Update i18n translation for VMware NSX plugin log msg's

All the existing LOG.info, LOG.warning, LOG.error and LOG.critical
messages should have _LI, _LW, _LE and _LC respectively. Also, debug
level log shouldn't be translated. This patch set will cover the vmware
directory under neutron/plugins.

Change-Id: Iba83af988cb2de919b05108f145efb19e9192ae4
Partial-Bug: #1320867
This commit is contained in:
Gary Kotton 2014-11-20 11:53:27 -08:00
parent 9bba05d77f
commit 418556a556
29 changed files with 481 additions and 442 deletions

View File

@ -73,10 +73,11 @@ def _directory_to_check_translation(filename):
"neutron/scheduler", "neutron/scheduler",
"neutron/server", "neutron/server",
"neutron/services", "neutron/services",
"neutron/plugins/cisco",
"neutron/plugins/ml2", "neutron/plugins/ml2",
"neutron/plugins/openvswitch", "neutron/plugins/openvswitch",
"neutron/plugins/linuxbridge", "neutron/plugins/linuxbridge",
"neutron/plugins/cisco"] "neutron/plugins/vmware"]
return any([dir in filename for dir in dirs]) return any([dir in filename for dir in dirs])

View File

@ -21,6 +21,7 @@ import time
from oslo.config import cfg from oslo.config import cfg
from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.plugins.vmware import api_client from neutron.plugins.vmware import api_client
@ -100,14 +101,14 @@ class ApiClientBase(object):
api_providers are configured. api_providers are configured.
''' '''
if not self._api_providers: if not self._api_providers:
LOG.warn(_("[%d] no API providers currently available."), rid) LOG.warn(_LW("[%d] no API providers currently available."), rid)
return None return None
if self._conn_pool.empty(): if self._conn_pool.empty():
LOG.debug(_("[%d] Waiting to acquire API client connection."), rid) LOG.debug("[%d] Waiting to acquire API client connection.", rid)
priority, conn = self._conn_pool.get() priority, conn = self._conn_pool.get()
now = time.time() now = time.time()
if getattr(conn, 'last_used', now) < now - cfg.CONF.conn_idle_timeout: if getattr(conn, 'last_used', now) < now - cfg.CONF.conn_idle_timeout:
LOG.info(_("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f " LOG.info(_LI("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f "
"seconds; reconnecting."), "seconds; reconnecting."),
{'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn), {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn),
'sec': now - conn.last_used}) 'sec': now - conn.last_used})
@ -116,8 +117,8 @@ class ApiClientBase(object):
conn.last_used = now conn.last_used = now
conn.priority = priority # stash current priority for release conn.priority = priority # stash current priority for release
qsize = self._conn_pool.qsize() qsize = self._conn_pool.qsize()
LOG.debug(_("[%(rid)d] Acquired connection %(conn)s. %(qsize)d " LOG.debug("[%(rid)d] Acquired connection %(conn)s. %(qsize)d "
"connection(s) available."), "connection(s) available.",
{'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn), {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn),
'qsize': qsize}) 'qsize': qsize})
if auto_login and self.auth_cookie(conn) is None: if auto_login and self.auth_cookie(conn) is None:
@ -137,8 +138,8 @@ class ApiClientBase(object):
''' '''
conn_params = self._conn_params(http_conn) conn_params = self._conn_params(http_conn)
if self._conn_params(http_conn) not in self._api_providers: if self._conn_params(http_conn) not in self._api_providers:
LOG.debug(_("[%(rid)d] Released connection %(conn)s is not an " LOG.debug("[%(rid)d] Released connection %(conn)s is not an "
"API provider for the cluster"), "API provider for the cluster",
{'rid': rid, {'rid': rid,
'conn': api_client.ctrl_conn_to_str(http_conn)}) 'conn': api_client.ctrl_conn_to_str(http_conn)})
return return
@ -148,7 +149,7 @@ class ApiClientBase(object):
priority = http_conn.priority priority = http_conn.priority
if bad_state: if bad_state:
# Reconnect to provider. # Reconnect to provider.
LOG.warn(_("[%(rid)d] Connection returned in bad state, " LOG.warn(_LW("[%(rid)d] Connection returned in bad state, "
"reconnecting to %(conn)s"), "reconnecting to %(conn)s"),
{'rid': rid, {'rid': rid,
'conn': api_client.ctrl_conn_to_str(http_conn)}) 'conn': api_client.ctrl_conn_to_str(http_conn)})
@ -170,8 +171,8 @@ class ApiClientBase(object):
self._next_conn_priority += 1 self._next_conn_priority += 1
self._conn_pool.put((priority, http_conn)) self._conn_pool.put((priority, http_conn))
LOG.debug(_("[%(rid)d] Released connection %(conn)s. %(qsize)d " LOG.debug("[%(rid)d] Released connection %(conn)s. %(qsize)d "
"connection(s) available."), "connection(s) available.",
{'rid': rid, 'conn': api_client.ctrl_conn_to_str(http_conn), {'rid': rid, 'conn': api_client.ctrl_conn_to_str(http_conn),
'qsize': self._conn_pool.qsize()}) 'qsize': self._conn_pool.qsize()})
@ -180,7 +181,7 @@ class ApiClientBase(object):
data = self._get_provider_data(conn) data = self._get_provider_data(conn)
if data is None: if data is None:
LOG.error(_("Login request for an invalid connection: '%s'"), LOG.error(_LE("Login request for an invalid connection: '%s'"),
api_client.ctrl_conn_to_str(conn)) api_client.ctrl_conn_to_str(conn))
return return
provider_sem = data[0] provider_sem = data[0]
@ -191,7 +192,7 @@ class ApiClientBase(object):
finally: finally:
provider_sem.release() provider_sem.release()
else: else:
LOG.debug(_("Waiting for auth to complete")) LOG.debug("Waiting for auth to complete")
# Wait until we can acquire then release # Wait until we can acquire then release
provider_sem.acquire(blocking=True) provider_sem.acquire(blocking=True)
provider_sem.release() provider_sem.release()
@ -233,7 +234,7 @@ class ApiClientBase(object):
""" """
if (not isinstance(conn_or_conn_params, tuple) and if (not isinstance(conn_or_conn_params, tuple) and
not isinstance(conn_or_conn_params, httplib.HTTPConnection)): not isinstance(conn_or_conn_params, httplib.HTTPConnection)):
LOG.debug(_("Invalid conn_params value: '%s'"), LOG.debug("Invalid conn_params value: '%s'",
str(conn_or_conn_params)) str(conn_or_conn_params))
return conn_or_conn_params return conn_or_conn_params
if isinstance(conn_or_conn_params, httplib.HTTPConnection): if isinstance(conn_or_conn_params, httplib.HTTPConnection):

View File

@ -17,6 +17,7 @@
import httplib import httplib
from neutron.i18n import _LE
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.plugins.vmware.api_client import base from neutron.plugins.vmware.api_client import base
from neutron.plugins.vmware.api_client import eventlet_client from neutron.plugins.vmware.api_client import eventlet_client
@ -86,7 +87,7 @@ class NsxApiClient(eventlet_client.EventletApiClient):
retries=self._retries, redirects=self._redirects) retries=self._retries, redirects=self._redirects)
g.start() g.start()
response = g.join() response = g.join()
LOG.debug(_('Request returns "%s"'), response) LOG.debug('Request returns "%s"', response)
# response is a modified HTTPResponse object or None. # response is a modified HTTPResponse object or None.
# response.read() will not work on response as the underlying library # response.read() will not work on response as the underlying library
@ -99,7 +100,7 @@ class NsxApiClient(eventlet_client.EventletApiClient):
if response is None: if response is None:
# Timeout. # Timeout.
LOG.error(_('Request timed out: %(method)s to %(url)s'), LOG.error(_LE('Request timed out: %(method)s to %(url)s'),
{'method': method, 'url': url}) {'method': method, 'url': url})
raise exception.RequestTimeout() raise exception.RequestTimeout()
@ -110,14 +111,14 @@ class NsxApiClient(eventlet_client.EventletApiClient):
# Fail-fast: Check for exception conditions and raise the # Fail-fast: Check for exception conditions and raise the
# appropriate exceptions for known error codes. # appropriate exceptions for known error codes.
if status in exception.ERROR_MAPPINGS: if status in exception.ERROR_MAPPINGS:
LOG.error(_("Received error code: %s"), status) LOG.error(_LE("Received error code: %s"), status)
LOG.error(_("Server Error Message: %s"), response.body) LOG.error(_LE("Server Error Message: %s"), response.body)
exception.ERROR_MAPPINGS[status](response) exception.ERROR_MAPPINGS[status](response)
# Continue processing for non-error condition. # Continue processing for non-error condition.
if (status != httplib.OK and status != httplib.CREATED if (status != httplib.OK and status != httplib.CREATED
and status != httplib.NO_CONTENT): and status != httplib.NO_CONTENT):
LOG.error(_("%(method)s to %(url)s, unexpected response code: " LOG.error(_LE("%(method)s to %(url)s, unexpected response code: "
"%(status)d (content = '%(body)s')"), "%(status)d (content = '%(body)s')"),
{'method': method, 'url': url, {'method': method, 'url': url,
'status': response.status, 'body': response.body}) 'status': response.status, 'body': response.body})
@ -134,6 +135,6 @@ class NsxApiClient(eventlet_client.EventletApiClient):
# one of the server that responds. # one of the server that responds.
self.request('GET', '/ws.v1/control-cluster/node') self.request('GET', '/ws.v1/control-cluster/node')
if not self._version: if not self._version:
LOG.error(_('Unable to determine NSX version. ' LOG.error(_LE('Unable to determine NSX version. '
'Plugin might not work as expected.')) 'Plugin might not work as expected.'))
return self._version return self._version

View File

@ -20,6 +20,7 @@ import time
import eventlet import eventlet
eventlet.monkey_patch() eventlet.monkey_patch()
from neutron.i18n import _LE
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.plugins.vmware.api_client import base from neutron.plugins.vmware.api_client import base
from neutron.plugins.vmware.api_client import eventlet_request from neutron.plugins.vmware.api_client import eventlet_request
@ -142,12 +143,12 @@ class EventletApiClient(base.ApiClientBase):
ret = g.join() ret = g.join()
if ret: if ret:
if isinstance(ret, Exception): if isinstance(ret, Exception):
LOG.error(_('Login error "%s"'), ret) LOG.error(_LE('Login error "%s"'), ret)
raise ret raise ret
cookie = ret.getheader("Set-Cookie") cookie = ret.getheader("Set-Cookie")
if cookie: if cookie:
LOG.debug(_("Saving new authentication cookie '%s'"), cookie) LOG.debug("Saving new authentication cookie '%s'", cookie)
return cookie return cookie

View File

@ -20,6 +20,7 @@ import urllib
import eventlet import eventlet
from oslo.serialization import jsonutils from oslo.serialization import jsonutils
from neutron.i18n import _LI, _LW
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.plugins.vmware.api_client import request from neutron.plugins.vmware.api_client import request
@ -119,7 +120,7 @@ class EventletApiRequest(request.ApiRequest):
with eventlet.timeout.Timeout(self._request_timeout, False): with eventlet.timeout.Timeout(self._request_timeout, False):
return self._handle_request() return self._handle_request()
LOG.info(_('[%d] Request timeout.'), self._rid()) LOG.info(_LI('[%d] Request timeout.'), self._rid())
self._request_error = Exception(_('Request timeout')) self._request_error = Exception(_('Request timeout'))
return None return None
else: else:
@ -146,14 +147,15 @@ class EventletApiRequest(request.ApiRequest):
continue continue
# else fall through to return the error code # else fall through to return the error code
LOG.debug(_("[%(rid)d] Completed request '%(method)s %(url)s'" LOG.debug("[%(rid)d] Completed request '%(method)s %(url)s'"
": %(status)s"), ": %(status)s",
{'rid': self._rid(), 'method': self._method, {'rid': self._rid(), 'method': self._method,
'url': self._url, 'status': req.status}) 'url': self._url, 'status': req.status})
self._request_error = None self._request_error = None
response = req response = req
else: else:
LOG.info(_('[%(rid)d] Error while handling request: %(req)s'), LOG.info(_LI('[%(rid)d] Error while handling request: '
'%(req)s'),
{'rid': self._rid(), 'req': req}) {'rid': self._rid(), 'req': req})
self._request_error = req self._request_error = req
response = None response = None
@ -209,7 +211,7 @@ class GetApiProvidersRequestEventlet(EventletApiRequest):
ret.append(_provider_from_listen_addr(addr)) ret.append(_provider_from_listen_addr(addr))
return ret return ret
except Exception as e: except Exception as e:
LOG.warn(_("[%(rid)d] Failed to parse API provider: %(e)s"), LOG.warn(_LW("[%(rid)d] Failed to parse API provider: %(e)s"),
{'rid': self._rid(), 'e': e}) {'rid': self._rid(), 'e': e})
# intentionally fall through # intentionally fall through
return None return None

View File

@ -25,6 +25,7 @@ from oslo.utils import excutils
import six import six
import six.moves.urllib.parse as urlparse import six.moves.urllib.parse as urlparse
from neutron.i18n import _LI, _LW
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.plugins.vmware import api_client from neutron.plugins.vmware import api_client
@ -86,8 +87,8 @@ class ApiRequest(object):
return error return error
url = self._url url = self._url
LOG.debug(_("[%(rid)d] Issuing - request url: %(conn)s " LOG.debug("[%(rid)d] Issuing - request url: %(conn)s "
"body: %(body)s"), "body: %(body)s",
{'rid': self._rid(), 'conn': self._request_str(conn, url), {'rid': self._rid(), 'conn': self._request_str(conn, url),
'body': self._body}) 'body': self._body})
issued_time = time.time() issued_time = time.time()
@ -114,13 +115,13 @@ class ApiRequest(object):
gen = self._api_client.config_gen gen = self._api_client.config_gen
if gen: if gen:
headers["X-Nvp-Wait-For-Config-Generation"] = gen headers["X-Nvp-Wait-For-Config-Generation"] = gen
LOG.debug(_("Setting X-Nvp-Wait-For-Config-Generation " LOG.debug("Setting X-Nvp-Wait-For-Config-Generation "
"request header: '%s'"), gen) "request header: '%s'", gen)
try: try:
conn.request(self._method, url, self._body, headers) conn.request(self._method, url, self._body, headers)
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.warn(_("[%(rid)d] Exception issuing request: " LOG.warn(_LW("[%(rid)d] Exception issuing request: "
"%(e)s"), "%(e)s"),
{'rid': self._rid(), 'e': e}) {'rid': self._rid(), 'e': e})
@ -128,8 +129,8 @@ class ApiRequest(object):
response.body = response.read() response.body = response.read()
response.headers = response.getheaders() response.headers = response.getheaders()
elapsed_time = time.time() - issued_time elapsed_time = time.time() - issued_time
LOG.debug(_("[%(rid)d] Completed request '%(conn)s': " LOG.debug("[%(rid)d] Completed request '%(conn)s': "
"%(status)s (%(elapsed)s seconds)"), "%(status)s (%(elapsed)s seconds)",
{'rid': self._rid(), {'rid': self._rid(),
'conn': self._request_str(conn, url), 'conn': self._request_str(conn, url),
'status': response.status, 'status': response.status,
@ -137,8 +138,8 @@ class ApiRequest(object):
new_gen = response.getheader('X-Nvp-Config-Generation', None) new_gen = response.getheader('X-Nvp-Config-Generation', None)
if new_gen: if new_gen:
LOG.debug(_("Reading X-Nvp-config-Generation response " LOG.debug("Reading X-Nvp-config-Generation response "
"header: '%s'"), new_gen) "header: '%s'", new_gen)
if (self._api_client.config_gen is None or if (self._api_client.config_gen is None or
self._api_client.config_gen < int(new_gen)): self._api_client.config_gen < int(new_gen)):
self._api_client.config_gen = int(new_gen) self._api_client.config_gen = int(new_gen)
@ -164,7 +165,7 @@ class ApiRequest(object):
httplib.TEMPORARY_REDIRECT]: httplib.TEMPORARY_REDIRECT]:
break break
elif redirects >= self._redirects: elif redirects >= self._redirects:
LOG.info(_("[%d] Maximum redirects exceeded, aborting " LOG.info(_LI("[%d] Maximum redirects exceeded, aborting "
"request"), self._rid()) "request"), self._rid())
break break
redirects += 1 redirects += 1
@ -174,7 +175,7 @@ class ApiRequest(object):
if url is None: if url is None:
response.status = httplib.INTERNAL_SERVER_ERROR response.status = httplib.INTERNAL_SERVER_ERROR
break break
LOG.info(_("[%(rid)d] Redirecting request to: %(conn)s"), LOG.info(_LI("[%(rid)d] Redirecting request to: %(conn)s"),
{'rid': self._rid(), {'rid': self._rid(),
'conn': self._request_str(conn, url)}) 'conn': self._request_str(conn, url)})
# yield here, just in case we are not out of the loop yet # yield here, just in case we are not out of the loop yet
@ -187,7 +188,7 @@ class ApiRequest(object):
# queue. # queue.
if (response.status == httplib.INTERNAL_SERVER_ERROR and if (response.status == httplib.INTERNAL_SERVER_ERROR and
response.status > httplib.NOT_IMPLEMENTED): response.status > httplib.NOT_IMPLEMENTED):
LOG.warn(_("[%(rid)d] Request '%(method)s %(url)s' " LOG.warn(_LW("[%(rid)d] Request '%(method)s %(url)s' "
"received: %(status)s"), "received: %(status)s"),
{'rid': self._rid(), 'method': self._method, {'rid': self._rid(), 'method': self._method,
'url': self._url, 'status': response.status}) 'url': self._url, 'status': response.status})
@ -200,7 +201,7 @@ class ApiRequest(object):
msg = unicode(e) msg = unicode(e)
if response is None: if response is None:
elapsed_time = time.time() - issued_time elapsed_time = time.time() - issued_time
LOG.warn(_("[%(rid)d] Failed request '%(conn)s': '%(msg)s' " LOG.warn(_LW("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
"(%(elapsed)s seconds)"), "(%(elapsed)s seconds)"),
{'rid': self._rid(), 'conn': self._request_str(conn, url), {'rid': self._rid(), 'conn': self._request_str(conn, url),
'msg': msg, 'elapsed': elapsed_time}) 'msg': msg, 'elapsed': elapsed_time})
@ -234,8 +235,8 @@ class ApiRequest(object):
url = value url = value
break break
if not url: if not url:
LOG.warn(_("[%d] Received redirect status without location header" LOG.warn(_LW("[%d] Received redirect status without location "
" field"), self._rid()) "header field"), self._rid())
return (conn, None) return (conn, None)
# Accept location with the following format: # Accept location with the following format:
# 1. /path, redirect to same node # 1. /path, redirect to same node
@ -251,12 +252,13 @@ class ApiRequest(object):
url = result.path url = result.path
return (conn, url) # case 1 return (conn, url) # case 1
else: else:
LOG.warn(_("[%(rid)d] Received invalid redirect location: " LOG.warn(_LW("[%(rid)d] Received invalid redirect location: "
"'%(url)s'"), {'rid': self._rid(), 'url': url}) "'%(url)s'"), {'rid': self._rid(), 'url': url})
return (conn, None) # case 3 return (conn, None) # case 3
elif result.scheme not in ["http", "https"] or not result.hostname: elif result.scheme not in ["http", "https"] or not result.hostname:
LOG.warn(_("[%(rid)d] Received malformed redirect " LOG.warn(_LW("[%(rid)d] Received malformed redirect "
"location: %(url)s"), {'rid': self._rid(), 'url': url}) "location: %(url)s"),
{'rid': self._rid(), 'url': url})
return (conn, None) # case 3 return (conn, None) # case 3
# case 2, redirect location includes a scheme # case 2, redirect location includes a scheme
# so setup a new connection and authenticate # so setup a new connection and authenticate

View File

@ -15,6 +15,7 @@
# under the License. # under the License.
# #
from neutron.i18n import _LW
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -27,7 +28,7 @@ def find_version(headers):
if header_name == 'server': if header_name == 'server':
return Version(header_value.split('/')[1]) return Version(header_value.split('/')[1])
except IndexError: except IndexError:
LOG.warning(_("Unable to fetch NSX version from response " LOG.warning(_LW("Unable to fetch NSX version from response "
"headers :%s"), headers) "headers :%s"), headers)

View File

@ -17,6 +17,7 @@ from neutron.api.v2 import attributes as attr
from neutron.common import exceptions as n_exc from neutron.common import exceptions as n_exc
from neutron.extensions import multiprovidernet as mpnet from neutron.extensions import multiprovidernet as mpnet
from neutron.extensions import providernet as pnet from neutron.extensions import providernet as pnet
from neutron.i18n import _LW
from neutron.openstack.common import log from neutron.openstack.common import log
from neutron.plugins.vmware.api_client import client from neutron.plugins.vmware.api_client import client
from neutron.plugins.vmware.api_client import exception as api_exc from neutron.plugins.vmware.api_client import exception as api_exc
@ -64,7 +65,7 @@ def get_nsx_switch_ids(session, cluster, neutron_network_id):
# more than once for each network in Neutron's lifetime # more than once for each network in Neutron's lifetime
nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id) nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id)
if not nsx_switches: if not nsx_switches:
LOG.warn(_("Unable to find NSX switches for Neutron network %s"), LOG.warn(_LW("Unable to find NSX switches for Neutron network %s"),
neutron_network_id) neutron_network_id)
return return
nsx_switch_ids = [] nsx_switch_ids = []
@ -111,7 +112,7 @@ def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
# NOTE(salv-orlando): Not handling the case where more than one # NOTE(salv-orlando): Not handling the case where more than one
# port is found with the same neutron port tag # port is found with the same neutron port tag
if not nsx_ports: if not nsx_ports:
LOG.warn(_("Unable to find NSX port for Neutron port %s"), LOG.warn(_LW("Unable to find NSX port for Neutron port %s"),
neutron_port_id) neutron_port_id)
# This method is supposed to return a tuple # This method is supposed to return a tuple
return None, None return None, None
@ -151,11 +152,11 @@ def get_nsx_security_group_id(session, cluster, neutron_id):
# NOTE(salv-orlando): Not handling the case where more than one # NOTE(salv-orlando): Not handling the case where more than one
# security profile is found with the same neutron port tag # security profile is found with the same neutron port tag
if not nsx_sec_profiles: if not nsx_sec_profiles:
LOG.warn(_("Unable to find NSX security profile for Neutron " LOG.warn(_LW("Unable to find NSX security profile for Neutron "
"security group %s"), neutron_id) "security group %s"), neutron_id)
return return
elif len(nsx_sec_profiles) > 1: elif len(nsx_sec_profiles) > 1:
LOG.warn(_("Multiple NSX security profiles found for Neutron " LOG.warn(_LW("Multiple NSX security profiles found for Neutron "
"security group %s"), neutron_id) "security group %s"), neutron_id)
nsx_sec_profile = nsx_sec_profiles[0] nsx_sec_profile = nsx_sec_profiles[0]
nsx_id = nsx_sec_profile['uuid'] nsx_id = nsx_sec_profile['uuid']
@ -186,7 +187,7 @@ def get_nsx_router_id(session, cluster, neutron_router_id):
# NOTE(salv-orlando): Not handling the case where more than one # NOTE(salv-orlando): Not handling the case where more than one
# port is found with the same neutron port tag # port is found with the same neutron port tag
if not nsx_routers: if not nsx_routers:
LOG.warn(_("Unable to find NSX router for Neutron router %s"), LOG.warn(_LW("Unable to find NSX router for Neutron router %s"),
neutron_router_id) neutron_router_id)
return return
nsx_router = nsx_routers[0] nsx_router = nsx_routers[0]
@ -243,10 +244,10 @@ def get_nsx_device_statuses(cluster, tenant_id):
except api_exc.NsxApiException: except api_exc.NsxApiException:
# Do not make a NSX API exception fatal # Do not make a NSX API exception fatal
if tenant_id: if tenant_id:
LOG.warn(_("Unable to retrieve operational status for gateway " LOG.warn(_LW("Unable to retrieve operational status for gateway "
"devices belonging to tenant: %s"), tenant_id) "devices belonging to tenant: %s"), tenant_id)
else: else:
LOG.warn(_("Unable to retrieve operational status for " LOG.warn(_LW("Unable to retrieve operational status for "
"gateway devices")) "gateway devices"))

View File

@ -25,6 +25,7 @@ from neutron.db import external_net_db
from neutron.db import l3_db from neutron.db import l3_db
from neutron.db import models_v2 from neutron.db import models_v2
from neutron.extensions import l3 from neutron.extensions import l3
from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import log from neutron.openstack.common import log
from neutron.openstack.common import loopingcall from neutron.openstack.common import loopingcall
from neutron.plugins.vmware.api_client import exception as api_exc from neutron.plugins.vmware.api_client import exception as api_exc
@ -262,7 +263,7 @@ class NsxSynchronizer():
# TODO(salv-orlando): We should be catching # TODO(salv-orlando): We should be catching
# api_exc.ResourceNotFound here # api_exc.ResourceNotFound here
# The logical switch was not found # The logical switch was not found
LOG.warning(_("Logical switch for neutron network %s not " LOG.warning(_LW("Logical switch for neutron network %s not "
"found on NSX."), neutron_network_data['id']) "found on NSX."), neutron_network_data['id'])
lswitches = [] lswitches = []
else: else:
@ -297,8 +298,8 @@ class NsxSynchronizer():
pass pass
else: else:
network.status = status network.status = status
LOG.debug(_("Updating status for neutron resource %(q_id)s to:" LOG.debug("Updating status for neutron resource %(q_id)s to:"
" %(status)s"), " %(status)s",
{'q_id': neutron_network_data['id'], {'q_id': neutron_network_data['id'],
'status': status}) 'status': status})
@ -349,7 +350,7 @@ class NsxSynchronizer():
# NOTE(salv-orlando): We should be catching # NOTE(salv-orlando): We should be catching
# api_exc.ResourceNotFound here # api_exc.ResourceNotFound here
# The logical router was not found # The logical router was not found
LOG.warning(_("Logical router for neutron router %s not " LOG.warning(_LW("Logical router for neutron router %s not "
"found on NSX."), neutron_router_data['id']) "found on NSX."), neutron_router_data['id'])
if lrouter: if lrouter:
# Update the cache # Update the cache
@ -379,8 +380,8 @@ class NsxSynchronizer():
pass pass
else: else:
router.status = status router.status = status
LOG.debug(_("Updating status for neutron resource %(q_id)s to:" LOG.debug("Updating status for neutron resource %(q_id)s to:"
" %(status)s"), " %(status)s",
{'q_id': neutron_router_data['id'], {'q_id': neutron_router_data['id'],
'status': status}) 'status': status})
@ -399,7 +400,7 @@ class NsxSynchronizer():
neutron_router_mappings[neutron_router_id] = ( neutron_router_mappings[neutron_router_id] = (
self._nsx_cache[lr_uuid]) self._nsx_cache[lr_uuid])
else: else:
LOG.warn(_("Unable to find Neutron router id for " LOG.warn(_LW("Unable to find Neutron router id for "
"NSX logical router: %s"), lr_uuid) "NSX logical router: %s"), lr_uuid)
# Fetch neutron routers from database # Fetch neutron routers from database
filters = ({} if scan_missing else filters = ({} if scan_missing else
@ -441,7 +442,7 @@ class NsxSynchronizer():
# api_exc.ResourceNotFound here instead # api_exc.ResourceNotFound here instead
# of PortNotFoundOnNetwork when the id exists but # of PortNotFoundOnNetwork when the id exists but
# the logical switch port was not found # the logical switch port was not found
LOG.warning(_("Logical switch port for neutron port %s " LOG.warning(_LW("Logical switch port for neutron port %s "
"not found on NSX."), neutron_port_data['id']) "not found on NSX."), neutron_port_data['id'])
lswitchport = None lswitchport = None
else: else:
@ -474,8 +475,8 @@ class NsxSynchronizer():
pass pass
else: else:
port.status = status port.status = status
LOG.debug(_("Updating status for neutron resource %(q_id)s to:" LOG.debug("Updating status for neutron resource %(q_id)s to:"
" %(status)s"), " %(status)s",
{'q_id': neutron_port_data['id'], {'q_id': neutron_port_data['id'],
'status': status}) 'status': status})
@ -534,7 +535,7 @@ class NsxSynchronizer():
# be emitted. # be emitted.
num_requests = page_size / (MAX_PAGE_SIZE + 1) + 1 num_requests = page_size / (MAX_PAGE_SIZE + 1) + 1
if num_requests > 1: if num_requests > 1:
LOG.warn(_("Requested page size is %(cur_chunk_size)d." LOG.warn(_LW("Requested page size is %(cur_chunk_size)d. "
"It might be necessary to do %(num_requests)d " "It might be necessary to do %(num_requests)d "
"round-trips to NSX for fetching data. Please " "round-trips to NSX for fetching data. Please "
"tune sync parameters to ensure chunk size " "tune sync parameters to ensure chunk size "
@ -567,7 +568,7 @@ class NsxSynchronizer():
def _fetch_nsx_data_chunk(self, sp): def _fetch_nsx_data_chunk(self, sp):
base_chunk_size = sp.chunk_size base_chunk_size = sp.chunk_size
chunk_size = base_chunk_size + sp.extra_chunk_size chunk_size = base_chunk_size + sp.extra_chunk_size
LOG.info(_("Fetching up to %s resources " LOG.info(_LI("Fetching up to %s resources "
"from NSX backend"), chunk_size) "from NSX backend"), chunk_size)
fetched = ls_count = lr_count = lp_count = 0 fetched = ls_count = lr_count = lp_count = 0
lswitches = lrouters = lswitchports = [] lswitches = lrouters = lswitchports = []
@ -587,13 +588,13 @@ class NsxSynchronizer():
# No cursors were provided. Then it must be possible to # No cursors were provided. Then it must be possible to
# calculate the total amount of data to fetch # calculate the total amount of data to fetch
sp.total_size = ls_count + lr_count + lp_count sp.total_size = ls_count + lr_count + lp_count
LOG.debug(_("Total data size: %d"), sp.total_size) LOG.debug("Total data size: %d", sp.total_size)
sp.chunk_size = self._get_chunk_size(sp) sp.chunk_size = self._get_chunk_size(sp)
# Calculate chunk size adjustment # Calculate chunk size adjustment
sp.extra_chunk_size = sp.chunk_size - base_chunk_size sp.extra_chunk_size = sp.chunk_size - base_chunk_size
LOG.debug(_("Fetched %(num_lswitches)d logical switches, " LOG.debug("Fetched %(num_lswitches)d logical switches, "
"%(num_lswitchports)d logical switch ports," "%(num_lswitchports)d logical switch ports,"
"%(num_lrouters)d logical routers"), "%(num_lrouters)d logical routers",
{'num_lswitches': len(lswitches), {'num_lswitches': len(lswitches),
'num_lswitchports': len(lswitchports), 'num_lswitchports': len(lswitchports),
'num_lrouters': len(lrouters)}) 'num_lrouters': len(lrouters)})
@ -607,7 +608,7 @@ class NsxSynchronizer():
# Reset page cursor variables if necessary # Reset page cursor variables if necessary
if sp.current_chunk == 0: if sp.current_chunk == 0:
sp.ls_cursor = sp.lr_cursor = sp.lp_cursor = 'start' sp.ls_cursor = sp.lr_cursor = sp.lp_cursor = 'start'
LOG.info(_("Running state synchronization task. Chunk: %s"), LOG.info(_LI("Running state synchronization task. Chunk: %s"),
sp.current_chunk) sp.current_chunk)
# Fetch chunk_size data from NSX # Fetch chunk_size data from NSX
try: try:
@ -617,18 +618,18 @@ class NsxSynchronizer():
sleep_interval = self._sync_backoff sleep_interval = self._sync_backoff
# Cap max back off to 64 seconds # Cap max back off to 64 seconds
self._sync_backoff = min(self._sync_backoff * 2, 64) self._sync_backoff = min(self._sync_backoff * 2, 64)
LOG.exception(_("An error occurred while communicating with " LOG.exception(_LE("An error occurred while communicating with "
"NSX backend. Will retry synchronization " "NSX backend. Will retry synchronization "
"in %d seconds"), sleep_interval) "in %d seconds"), sleep_interval)
return sleep_interval return sleep_interval
LOG.debug(_("Time elapsed querying NSX: %s"), LOG.debug("Time elapsed querying NSX: %s",
timeutils.utcnow() - start) timeutils.utcnow() - start)
if sp.total_size: if sp.total_size:
num_chunks = ((sp.total_size / sp.chunk_size) + num_chunks = ((sp.total_size / sp.chunk_size) +
(sp.total_size % sp.chunk_size != 0)) (sp.total_size % sp.chunk_size != 0))
else: else:
num_chunks = 1 num_chunks = 1
LOG.debug(_("Number of chunks: %d"), num_chunks) LOG.debug("Number of chunks: %d", num_chunks)
# Find objects which have changed on NSX side and need # Find objects which have changed on NSX side and need
# to be synchronized # to be synchronized
LOG.debug("Processing NSX cache for updated objects") LOG.debug("Processing NSX cache for updated objects")
@ -646,7 +647,7 @@ class NsxSynchronizer():
changed_only=not scan_missing) changed_only=not scan_missing)
lp_uuids = self._nsx_cache.get_lswitchports( lp_uuids = self._nsx_cache.get_lswitchports(
changed_only=not scan_missing) changed_only=not scan_missing)
LOG.debug(_("Time elapsed hashing data: %s"), LOG.debug("Time elapsed hashing data: %s",
timeutils.utcnow() - start) timeutils.utcnow() - start)
# Get an admin context # Get an admin context
ctx = context.get_admin_context() ctx = context.get_admin_context()
@ -658,7 +659,7 @@ class NsxSynchronizer():
self._synchronize_lswitchports(ctx, lp_uuids, self._synchronize_lswitchports(ctx, lp_uuids,
scan_missing=scan_missing) scan_missing=scan_missing)
# Increase chunk counter # Increase chunk counter
LOG.info(_("Synchronization for chunk %(chunk_num)d of " LOG.info(_LI("Synchronization for chunk %(chunk_num)d of "
"%(total_chunks)d performed"), "%(total_chunks)d performed"),
{'chunk_num': sp.current_chunk + 1, {'chunk_num': sp.current_chunk + 1,
'total_chunks': num_chunks}) 'total_chunks': num_chunks})
@ -670,6 +671,6 @@ class NsxSynchronizer():
sp.init_sync_performed = True sp.init_sync_performed = True
# Add additional random delay # Add additional random delay
added_delay = random.randint(0, self._max_rand_delay) added_delay = random.randint(0, self._max_rand_delay)
LOG.debug(_("Time elapsed at end of sync: %s"), LOG.debug("Time elapsed at end of sync: %s",
timeutils.utcnow() - start) timeutils.utcnow() - start)
return self._sync_interval / num_chunks + added_delay return self._sync_interval / num_chunks + added_delay

View File

@ -61,7 +61,7 @@ def device_id_to_vm_id(device_id, obfuscate=False):
def check_and_truncate(display_name): def check_and_truncate(display_name):
if (attributes.is_attr_set(display_name) and if (attributes.is_attr_set(display_name) and
len(display_name) > MAX_DISPLAY_NAME_LEN): len(display_name) > MAX_DISPLAY_NAME_LEN):
LOG.debug(_("Specified name:'%s' exceeds maximum length. " LOG.debug("Specified name:'%s' exceeds maximum length. "
"It will be truncated on NSX"), display_name) "It will be truncated on NSX", display_name)
return display_name[:MAX_DISPLAY_NAME_LEN] return display_name[:MAX_DISPLAY_NAME_LEN]
return display_name or '' return display_name or ''

View File

@ -74,7 +74,7 @@ def add_neutron_nsx_port_mapping(session, neutron_id,
# this should not occur whilst a mapping already exists # this should not occur whilst a mapping already exists
current = get_nsx_switch_and_port_id(session, neutron_id) current = get_nsx_switch_and_port_id(session, neutron_id)
if current[1] == nsx_port_id: if current[1] == nsx_port_id:
LOG.debug(_("Port mapping for %s already available"), LOG.debug("Port mapping for %s already available",
neutron_id) neutron_id)
ctxt.reraise = False ctxt.reraise = False
except db_exc.DBError: except db_exc.DBError:
@ -121,8 +121,8 @@ def get_nsx_switch_and_port_id(session, neutron_id):
one()) one())
return mapping['nsx_switch_id'], mapping['nsx_port_id'] return mapping['nsx_switch_id'], mapping['nsx_port_id']
except exc.NoResultFound: except exc.NoResultFound:
LOG.debug(_("NSX identifiers for neutron port %s not yet " LOG.debug("NSX identifiers for neutron port %s not yet "
"stored in Neutron DB"), neutron_id) "stored in Neutron DB", neutron_id)
return None, None return None, None
@ -132,8 +132,8 @@ def get_nsx_router_id(session, neutron_id):
filter_by(neutron_id=neutron_id).one()) filter_by(neutron_id=neutron_id).one())
return mapping['nsx_id'] return mapping['nsx_id']
except exc.NoResultFound: except exc.NoResultFound:
LOG.debug(_("NSX identifiers for neutron router %s not yet " LOG.debug("NSX identifiers for neutron router %s not yet "
"stored in Neutron DB"), neutron_id) "stored in Neutron DB", neutron_id)
def get_nsx_security_group_id(session, neutron_id): def get_nsx_security_group_id(session, neutron_id):
@ -147,8 +147,8 @@ def get_nsx_security_group_id(session, neutron_id):
one()) one())
return mapping['nsx_id'] return mapping['nsx_id']
except exc.NoResultFound: except exc.NoResultFound:
LOG.debug(_("NSX identifiers for neutron security group %s not yet " LOG.debug("NSX identifiers for neutron security group %s not yet "
"stored in Neutron DB"), neutron_id) "stored in Neutron DB", neutron_id)
return None return None

View File

@ -281,7 +281,7 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
gw_db.devices.extend([NetworkGatewayDeviceReference(**device) gw_db.devices.extend([NetworkGatewayDeviceReference(**device)
for device in gw_data['devices']]) for device in gw_data['devices']])
context.session.add(gw_db) context.session.add(gw_db)
LOG.debug(_("Created network gateway with id:%s"), gw_db['id']) LOG.debug("Created network gateway with id:%s", gw_db['id'])
return self._make_network_gateway_dict(gw_db) return self._make_network_gateway_dict(gw_db)
def update_network_gateway(self, context, id, network_gateway): def update_network_gateway(self, context, id, network_gateway):
@ -293,7 +293,7 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
# Ensure there is something to update before doing it # Ensure there is something to update before doing it
if any([gw_db[k] != gw_data[k] for k in gw_data]): if any([gw_db[k] != gw_data[k] for k in gw_data]):
gw_db.update(gw_data) gw_db.update(gw_data)
LOG.debug(_("Updated network gateway with id:%s"), id) LOG.debug("Updated network gateway with id:%s", id)
return self._make_network_gateway_dict(gw_db) return self._make_network_gateway_dict(gw_db)
def get_network_gateway(self, context, id, fields=None): def get_network_gateway(self, context, id, fields=None):
@ -308,7 +308,7 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
if gw_db.default: if gw_db.default:
raise NetworkGatewayUnchangeable(gateway_id=id) raise NetworkGatewayUnchangeable(gateway_id=id)
context.session.delete(gw_db) context.session.delete(gw_db)
LOG.debug(_("Network gateway '%s' was destroyed."), id) LOG.debug("Network gateway '%s' was destroyed.", id)
def get_network_gateways(self, context, filters=None, fields=None, def get_network_gateways(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, sorts=None, limit=None, marker=None,
@ -325,8 +325,8 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
def connect_network(self, context, network_gateway_id, def connect_network(self, context, network_gateway_id,
network_mapping_info): network_mapping_info):
network_id = self._validate_network_mapping_info(network_mapping_info) network_id = self._validate_network_mapping_info(network_mapping_info)
LOG.debug(_("Connecting network '%(network_id)s' to gateway " LOG.debug("Connecting network '%(network_id)s' to gateway "
"'%(network_gateway_id)s'"), "'%(network_gateway_id)s'",
{'network_id': network_id, {'network_id': network_id,
'network_gateway_id': network_gateway_id}) 'network_gateway_id': network_gateway_id})
with context.session.begin(subtransactions=True): with context.session.begin(subtransactions=True):
@ -374,8 +374,8 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
LOG.error(err_msg) LOG.error(err_msg)
raise exceptions.InvalidInput(error_message=err_msg) raise exceptions.InvalidInput(error_message=err_msg)
port_id = port['id'] port_id = port['id']
LOG.debug(_("Gateway port for '%(network_gateway_id)s' " LOG.debug("Gateway port for '%(network_gateway_id)s' "
"created on network '%(network_id)s':%(port_id)s"), "created on network '%(network_id)s':%(port_id)s",
{'network_gateway_id': network_gateway_id, {'network_gateway_id': network_gateway_id,
'network_id': network_id, 'network_id': network_id,
'port_id': port_id}) 'port_id': port_id})
@ -390,7 +390,7 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
self._delete_ip_allocation(context, network_id, self._delete_ip_allocation(context, network_id,
fixed_ip['subnet_id'], fixed_ip['subnet_id'],
fixed_ip['ip_address']) fixed_ip['ip_address'])
LOG.debug(_("Ensured no Ip addresses are configured on port %s"), LOG.debug("Ensured no Ip addresses are configured on port %s",
port_id) port_id)
return {'connection_info': return {'connection_info':
{'network_gateway_id': network_gateway_id, {'network_gateway_id': network_gateway_id,
@ -400,8 +400,8 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
def disconnect_network(self, context, network_gateway_id, def disconnect_network(self, context, network_gateway_id,
network_mapping_info): network_mapping_info):
network_id = self._validate_network_mapping_info(network_mapping_info) network_id = self._validate_network_mapping_info(network_mapping_info)
LOG.debug(_("Disconnecting network '%(network_id)s' from gateway " LOG.debug("Disconnecting network '%(network_id)s' from gateway "
"'%(network_gateway_id)s'"), "'%(network_gateway_id)s'",
{'network_id': network_id, {'network_id': network_id,
'network_gateway_id': network_gateway_id}) 'network_gateway_id': network_gateway_id})
with context.session.begin(subtransactions=True): with context.session.begin(subtransactions=True):
@ -494,7 +494,7 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
connector_ip=device_data['connector_ip'], connector_ip=device_data['connector_ip'],
status=initial_status) status=initial_status)
context.session.add(device_db) context.session.add(device_db)
LOG.debug(_("Created network gateway device: %s"), device_db['id']) LOG.debug("Created network gateway device: %s", device_db['id'])
return self._make_gateway_device_dict(device_db) return self._make_gateway_device_dict(device_db)
def update_gateway_device(self, context, gateway_device_id, def update_gateway_device(self, context, gateway_device_id,
@ -505,7 +505,7 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
# Ensure there is something to update before doing it # Ensure there is something to update before doing it
if any([device_db[k] != device_data[k] for k in device_data]): if any([device_db[k] != device_data[k] for k in device_data]):
device_db.update(device_data) device_db.update(device_data)
LOG.debug(_("Updated network gateway device: %s"), LOG.debug("Updated network gateway device: %s",
gateway_device_id) gateway_device_id)
return self._make_gateway_device_dict( return self._make_gateway_device_dict(
device_db, include_nsx_id=include_nsx_id) device_db, include_nsx_id=include_nsx_id)
@ -518,4 +518,4 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
raise GatewayDeviceInUse(device_id=device_id) raise GatewayDeviceInUse(device_id=device_id)
device_db = self._get_gateway_device(context, device_id) device_db = self._get_gateway_device(context, device_id)
context.session.delete(device_db) context.session.delete(device_db)
LOG.debug(_("Deleted network gateway device: %s."), device_id) LOG.debug("Deleted network gateway device: %s.", device_id)

View File

@ -22,6 +22,7 @@ from neutron.api.v2 import attributes as attr
from neutron.db import db_base_plugin_v2 from neutron.db import db_base_plugin_v2
from neutron.db import model_base from neutron.db import model_base
from neutron.db import models_v2 from neutron.db import models_v2
from neutron.i18n import _LI
from neutron.openstack.common import log from neutron.openstack.common import log
from neutron.openstack.common import uuidutils from neutron.openstack.common import uuidutils
from neutron.plugins.vmware.extensions import qos from neutron.plugins.vmware.extensions import qos
@ -292,7 +293,7 @@ class QoSDbMixin(qos.QueuePluginBase):
if dscp: if dscp:
# must raise because a non-zero dscp was provided # must raise because a non-zero dscp was provided
raise qos.QueueInvalidMarking() raise qos.QueueInvalidMarking()
LOG.info(_("DSCP value (%s) will be ignored with 'trusted' " LOG.info(_LI("DSCP value (%s) will be ignored with 'trusted' "
"marking"), dscp) "marking"), dscp)
max = qos_queue.get('max') max = qos_queue.get('max')
min = qos_queue.get('min') min = qos_queue.get('min')

View File

@ -20,6 +20,7 @@ from oslo.db import exception as db_exc
from oslo.utils import excutils from oslo.utils import excutils
from neutron.common import exceptions as n_exc from neutron.common import exceptions as n_exc
from neutron.i18n import _LE, _LW
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.plugins.vmware.api_client import exception as api_exc from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import exceptions as p_exc from neutron.plugins.vmware.common import exceptions as p_exc
@ -67,13 +68,16 @@ class LsnManager(object):
try: try:
return lsn_api.lsn_for_network_get(self.cluster, network_id) return lsn_api.lsn_for_network_get(self.cluster, network_id)
except (n_exc.NotFound, api_exc.NsxApiException): except (n_exc.NotFound, api_exc.NsxApiException):
msg = _('Unable to find Logical Service Node for network %s')
if raise_on_err: if raise_on_err:
LOG.error(msg, network_id) LOG.error(_LE('Unable to find Logical Service Node for '
'network %s.'),
network_id)
raise p_exc.LsnNotFound(entity='network', raise p_exc.LsnNotFound(entity='network',
entity_id=network_id) entity_id=network_id)
else: else:
LOG.warn(msg, network_id) LOG.warn(_LW('Unable to find Logical Service Node for '
'the requested network %s.'),
network_id)
def lsn_create(self, context, network_id): def lsn_create(self, context, network_id):
"""Create a LSN associated to the network.""" """Create a LSN associated to the network."""
@ -88,7 +92,7 @@ class LsnManager(object):
try: try:
lsn_api.lsn_delete(self.cluster, lsn_id) lsn_api.lsn_delete(self.cluster, lsn_id)
except (n_exc.NotFound, api_exc.NsxApiException): except (n_exc.NotFound, api_exc.NsxApiException):
LOG.warn(_('Unable to delete Logical Service Node %s'), lsn_id) LOG.warn(_LW('Unable to delete Logical Service Node %s'), lsn_id)
def lsn_delete_by_network(self, context, network_id): def lsn_delete_by_network(self, context, network_id):
"""Delete a LSN associated to the network.""" """Delete a LSN associated to the network."""
@ -104,15 +108,19 @@ class LsnManager(object):
lsn_port_id = lsn_api.lsn_port_by_subnet_get( lsn_port_id = lsn_api.lsn_port_by_subnet_get(
self.cluster, lsn_id, subnet_id) self.cluster, lsn_id, subnet_id)
except (n_exc.NotFound, api_exc.NsxApiException): except (n_exc.NotFound, api_exc.NsxApiException):
msg = _('Unable to find Logical Service Node Port for '
'LSN %(lsn_id)s and subnet %(subnet_id)s')
if raise_on_err: if raise_on_err:
LOG.error(msg, {'lsn_id': lsn_id, 'subnet_id': subnet_id}) LOG.error(_LE('Unable to find Logical Service Node Port '
'for LSN %(lsn_id)s and subnet '
'%(subnet_id)s'),
{'lsn_id': lsn_id, 'subnet_id': subnet_id})
raise p_exc.LsnPortNotFound(lsn_id=lsn_id, raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
entity='subnet', entity='subnet',
entity_id=subnet_id) entity_id=subnet_id)
else: else:
LOG.warn(msg, {'lsn_id': lsn_id, 'subnet_id': subnet_id}) LOG.warn(_LW('Unable to find Logical Service Node Port '
'for LSN %(lsn_id)s and subnet '
'%(subnet_id)s'),
{'lsn_id': lsn_id, 'subnet_id': subnet_id})
return (lsn_id, None) return (lsn_id, None)
else: else:
return (lsn_id, lsn_port_id) return (lsn_id, lsn_port_id)
@ -127,15 +135,19 @@ class LsnManager(object):
lsn_port_id = lsn_api.lsn_port_by_mac_get( lsn_port_id = lsn_api.lsn_port_by_mac_get(
self.cluster, lsn_id, mac) self.cluster, lsn_id, mac)
except (n_exc.NotFound, api_exc.NsxApiException): except (n_exc.NotFound, api_exc.NsxApiException):
msg = _('Unable to find Logical Service Node Port for '
'LSN %(lsn_id)s and mac address %(mac)s')
if raise_on_err: if raise_on_err:
LOG.error(msg, {'lsn_id': lsn_id, 'mac': mac}) LOG.error(_LE('Unable to find Logical Service Node Port '
'for LSN %(lsn_id)s and mac address '
'%(mac)s'),
{'lsn_id': lsn_id, 'mac': mac})
raise p_exc.LsnPortNotFound(lsn_id=lsn_id, raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
entity='MAC', entity='MAC',
entity_id=mac) entity_id=mac)
else: else:
LOG.warn(msg, {'lsn_id': lsn_id, 'mac': mac}) LOG.warn(_LW('Unable to find Logical Service Node '
'Port for LSN %(lsn_id)s and mac address '
'%(mac)s'),
{'lsn_id': lsn_id, 'mac': mac})
return (lsn_id, None) return (lsn_id, None)
else: else:
return (lsn_id, lsn_port_id) return (lsn_id, lsn_port_id)
@ -157,7 +169,7 @@ class LsnManager(object):
try: try:
lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id)
except (n_exc.NotFound, api_exc.NsxApiException): except (n_exc.NotFound, api_exc.NsxApiException):
LOG.warn(_('Unable to delete LSN Port %s'), lsn_port_id) LOG.warn(_LW('Unable to delete LSN Port %s'), lsn_port_id)
def lsn_port_dispose(self, context, network_id, mac_address): def lsn_port_dispose(self, context, network_id, mac_address):
"""Delete a LSN port given the network and the mac address.""" """Delete a LSN port given the network and the mac address."""
@ -174,10 +186,10 @@ class LsnManager(object):
self.cluster, network_id, lswitch_port_id) self.cluster, network_id, lswitch_port_id)
except (n_exc.PortNotFoundOnNetwork, except (n_exc.PortNotFoundOnNetwork,
api_exc.NsxApiException): api_exc.NsxApiException):
LOG.warn(_("Metadata port not found while attempting " LOG.warn(_LW("Metadata port not found while attempting "
"to delete it from network %s"), network_id) "to delete it from network %s"), network_id)
else: else:
LOG.warn(_("Unable to find Logical Services Node " LOG.warn(_LW("Unable to find Logical Services Node "
"Port with MAC %s"), mac_address) "Port with MAC %s"), mac_address)
def lsn_port_dhcp_setup( def lsn_port_dhcp_setup(
@ -305,7 +317,7 @@ class LsnManager(object):
if lsn_id and lsn_port_id: if lsn_id and lsn_port_id:
hdlr(self.cluster, lsn_id, lsn_port_id, data) hdlr(self.cluster, lsn_id, lsn_port_id, data)
except (n_exc.NotFound, api_exc.NsxApiException): except (n_exc.NotFound, api_exc.NsxApiException):
LOG.error(_('Error while configuring LSN ' LOG.error(_LE('Error while configuring LSN '
'port %s'), lsn_port_id) 'port %s'), lsn_port_id)
raise p_exc.PortConfigurationError( raise p_exc.PortConfigurationError(
net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id)

View File

@ -18,6 +18,7 @@
from neutron.common import constants as const from neutron.common import constants as const
from neutron.common import exceptions as n_exc from neutron.common import exceptions as n_exc
from neutron.extensions import external_net from neutron.extensions import external_net
from neutron.i18n import _LE
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.plugins.vmware.common import exceptions as p_exc from neutron.plugins.vmware.common import exceptions as p_exc
from neutron.plugins.vmware.dhcp_meta import nsx from neutron.plugins.vmware.dhcp_meta import nsx
@ -78,7 +79,7 @@ class DhcpMetadataBuilder(object):
try: try:
self.plugin.delete_port(context, port['id']) self.plugin.delete_port(context, port['id'])
except n_exc.PortNotFound: except n_exc.PortNotFound:
LOG.error(_('Port %s is already gone'), port['id']) LOG.error(_LE('Port %s is already gone'), port['id'])
def dhcp_allocate(self, context, network_id, subnet): def dhcp_allocate(self, context, network_id, subnet):
"""Allocate dhcp resources for the subnet.""" """Allocate dhcp resources for the subnet."""

View File

@ -24,6 +24,7 @@ from neutron.common import exceptions as n_exc
from neutron.db import db_base_plugin_v2 from neutron.db import db_base_plugin_v2
from neutron.db import l3_db from neutron.db import l3_db
from neutron.extensions import external_net from neutron.extensions import external_net
from neutron.i18n import _LE, _LI
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.plugins.vmware.common import exceptions as p_exc from neutron.plugins.vmware.common import exceptions as p_exc
from neutron.plugins.vmware.dhcp_meta import constants as d_const from neutron.plugins.vmware.dhcp_meta import constants as d_const
@ -133,12 +134,11 @@ class DhcpAgentNotifyAPI(object):
# down below as well as handle_port_metadata_access # down below as well as handle_port_metadata_access
self.plugin.create_port(context, {'port': dhcp_port}) self.plugin.create_port(context, {'port': dhcp_port})
except p_exc.PortConfigurationError as e: except p_exc.PortConfigurationError as e:
err_msg = (_("Error while creating subnet %(cidr)s for " LOG.error(_LE("Error while creating subnet %(cidr)s for "
"network %(network)s. Please, contact " "network %(network)s. Please, contact "
"administrator") % "administrator"),
{"cidr": subnet["cidr"], {"cidr": subnet["cidr"],
"network": network_id}) "network": network_id})
LOG.error(err_msg)
db_base_plugin_v2.NeutronDbPluginV2.delete_port( db_base_plugin_v2.NeutronDbPluginV2.delete_port(
self.plugin, context, e.port_id) self.plugin, context, e.port_id)
if clean_on_err: if clean_on_err:
@ -203,12 +203,13 @@ def check_services_requirements(cluster):
def handle_network_dhcp_access(plugin, context, network, action): def handle_network_dhcp_access(plugin, context, network, action):
LOG.info(_("Performing DHCP %(action)s for resource: %(resource)s") LOG.info(_LI("Performing DHCP %(action)s for resource: %(resource)s"),
% {"action": action, "resource": network}) {"action": action, "resource": network})
if action == 'create_network': if action == 'create_network':
network_id = network['id'] network_id = network['id']
if network.get(external_net.EXTERNAL): if network.get(external_net.EXTERNAL):
LOG.info(_("Network %s is external: no LSN to create"), network_id) LOG.info(_LI("Network %s is external: no LSN to create"),
network_id)
return return
plugin.lsn_manager.lsn_create(context, network_id) plugin.lsn_manager.lsn_create(context, network_id)
elif action == 'delete_network': elif action == 'delete_network':
@ -216,13 +217,13 @@ def handle_network_dhcp_access(plugin, context, network, action):
# is just the network id # is just the network id
network_id = network network_id = network
plugin.lsn_manager.lsn_delete_by_network(context, network_id) plugin.lsn_manager.lsn_delete_by_network(context, network_id)
LOG.info(_("Logical Services Node for network " LOG.info(_LI("Logical Services Node for network "
"%s configured successfully"), network_id) "%s configured successfully"), network_id)
def handle_port_dhcp_access(plugin, context, port, action): def handle_port_dhcp_access(plugin, context, port, action):
LOG.info(_("Performing DHCP %(action)s for resource: %(resource)s") LOG.info(_LI("Performing DHCP %(action)s for resource: %(resource)s"),
% {"action": action, "resource": port}) {"action": action, "resource": port})
if port["device_owner"] == const.DEVICE_OWNER_DHCP: if port["device_owner"] == const.DEVICE_OWNER_DHCP:
network_id = port["network_id"] network_id = port["network_id"]
if action == "create_port": if action == "create_port":
@ -238,9 +239,8 @@ def handle_port_dhcp_access(plugin, context, port, action):
plugin.lsn_manager.lsn_port_dhcp_setup( plugin.lsn_manager.lsn_port_dhcp_setup(
context, network_id, port['id'], subnet_data, subnet) context, network_id, port['id'], subnet_data, subnet)
except p_exc.PortConfigurationError: except p_exc.PortConfigurationError:
err_msg = (_("Error while configuring DHCP for " LOG.error(_LE("Error while configuring DHCP for "
"port %s"), port['id']) "port %s"), port['id'])
LOG.error(err_msg)
raise n_exc.NeutronException() raise n_exc.NeutronException()
elif action == "delete_port": elif action == "delete_port":
plugin.lsn_manager.lsn_port_dispose(context, network_id, plugin.lsn_manager.lsn_port_dispose(context, network_id,
@ -250,7 +250,7 @@ def handle_port_dhcp_access(plugin, context, port, action):
# do something only if there are IP's and dhcp is enabled # do something only if there are IP's and dhcp is enabled
subnet_id = port["fixed_ips"][0]['subnet_id'] subnet_id = port["fixed_ips"][0]['subnet_id']
if not plugin.get_subnet(context, subnet_id)['enable_dhcp']: if not plugin.get_subnet(context, subnet_id)['enable_dhcp']:
LOG.info(_("DHCP is disabled for subnet %s: nothing " LOG.info(_LI("DHCP is disabled for subnet %s: nothing "
"to do"), subnet_id) "to do"), subnet_id)
return return
host_data = { host_data = {
@ -269,7 +269,7 @@ def handle_port_dhcp_access(plugin, context, port, action):
if action == 'create_port': if action == 'create_port':
db_base_plugin_v2.NeutronDbPluginV2.delete_port( db_base_plugin_v2.NeutronDbPluginV2.delete_port(
plugin, context, port['id']) plugin, context, port['id'])
LOG.info(_("DHCP for port %s configured successfully"), port['id']) LOG.info(_LI("DHCP for port %s configured successfully"), port['id'])
def handle_port_metadata_access(plugin, context, port, is_delete=False): def handle_port_metadata_access(plugin, context, port, is_delete=False):
@ -277,7 +277,8 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
network_id = port["network_id"] network_id = port["network_id"]
network = plugin.get_network(context, network_id) network = plugin.get_network(context, network_id)
if network[external_net.EXTERNAL]: if network[external_net.EXTERNAL]:
LOG.info(_("Network %s is external: nothing to do"), network_id) LOG.info(_LI("Network %s is external: nothing to do"),
network_id)
return return
subnet_id = port["fixed_ips"][0]['subnet_id'] subnet_id = port["fixed_ips"][0]['subnet_id']
host_data = { host_data = {
@ -285,7 +286,7 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
"tenant_id": port["tenant_id"], "tenant_id": port["tenant_id"],
"ip_address": port["fixed_ips"][0]['ip_address'] "ip_address": port["fixed_ips"][0]['ip_address']
} }
LOG.info(_("Configuring metadata entry for port %s"), port) LOG.info(_LI("Configuring metadata entry for port %s"), port)
if not is_delete: if not is_delete:
handler = plugin.lsn_manager.lsn_port_meta_host_add handler = plugin.lsn_manager.lsn_port_meta_host_add
else: else:
@ -297,12 +298,13 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
if not is_delete: if not is_delete:
db_base_plugin_v2.NeutronDbPluginV2.delete_port( db_base_plugin_v2.NeutronDbPluginV2.delete_port(
plugin, context, port['id']) plugin, context, port['id'])
LOG.info(_("Metadata for port %s configured successfully"), port['id']) LOG.info(_LI("Metadata for port %s configured successfully"),
port['id'])
def handle_router_metadata_access(plugin, context, router_id, interface=None): def handle_router_metadata_access(plugin, context, router_id, interface=None):
LOG.info(_("Handle metadata access via router: %(r)s and " LOG.info(_LI("Handle metadata access via router: %(r)s and "
"interface %(i)s") % {'r': router_id, 'i': interface}) "interface %(i)s"), {'r': router_id, 'i': interface})
if interface: if interface:
try: try:
plugin.get_port(context, interface['port_id']) plugin.get_port(context, interface['port_id'])
@ -318,4 +320,4 @@ def handle_router_metadata_access(plugin, context, router_id, interface=None):
if is_enabled: if is_enabled:
l3_db.L3_NAT_db_mixin.remove_router_interface( l3_db.L3_NAT_db_mixin.remove_router_interface(
plugin, context, router_id, interface) plugin, context, router_id, interface)
LOG.info(_("Metadata for router %s handled successfully"), router_id) LOG.info(_LI("Metadata for router %s handled successfully"), router_id)

View File

@ -25,6 +25,7 @@ from neutron.common import exceptions as ntn_exc
from neutron.db import db_base_plugin_v2 from neutron.db import db_base_plugin_v2
from neutron.db import l3_db from neutron.db import l3_db
from neutron.db import models_v2 from neutron.db import models_v2
from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.plugins.vmware.api_client import exception as api_exc from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import config from neutron.plugins.vmware.common import config
@ -64,8 +65,9 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
# route. This is done via the enable_isolated_metadata # route. This is done via the enable_isolated_metadata
# option if desired. # option if desired.
if not subnet.get('gateway_ip'): if not subnet.get('gateway_ip'):
LOG.info(_('Subnet %s does not have a gateway, the metadata ' LOG.info(_LI('Subnet %s does not have a gateway, the '
'route will not be created'), subnet['id']) 'metadata route will not be created'),
subnet['id'])
return return
metadata_routes = [r for r in subnet.routes metadata_routes = [r for r in subnet.routes
if r['destination'] == METADATA_DHCP_ROUTE] if r['destination'] == METADATA_DHCP_ROUTE]
@ -88,10 +90,10 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
def handle_router_metadata_access(plugin, context, router_id, interface=None): def handle_router_metadata_access(plugin, context, router_id, interface=None):
if cfg.CONF.NSX.metadata_mode != config.MetadataModes.DIRECT: if cfg.CONF.NSX.metadata_mode != config.MetadataModes.DIRECT:
LOG.debug(_("Metadata access network is disabled")) LOG.debug("Metadata access network is disabled")
return return
if not cfg.CONF.allow_overlapping_ips: if not cfg.CONF.allow_overlapping_ips:
LOG.warn(_("Overlapping IPs must be enabled in order to setup " LOG.warn(_LW("Overlapping IPs must be enabled in order to setup "
"the metadata access network")) "the metadata access network"))
return return
ctx_elevated = context.elevated() ctx_elevated = context.elevated()
@ -111,15 +113,15 @@ def handle_router_metadata_access(plugin, context, router_id, interface=None):
_destroy_metadata_access_network( _destroy_metadata_access_network(
plugin, ctx_elevated, router_id, ports) plugin, ctx_elevated, router_id, ports)
else: else:
LOG.debug(_("No router interface found for router '%s'. " LOG.debug("No router interface found for router '%s'. "
"No metadata access network should be " "No metadata access network should be "
"created or destroyed"), router_id) "created or destroyed", router_id)
# TODO(salvatore-orlando): A better exception handling in the # TODO(salvatore-orlando): A better exception handling in the
# NSX plugin would allow us to improve error handling here # NSX plugin would allow us to improve error handling here
except (ntn_exc.NeutronException, nsx_exc.NsxPluginException, except (ntn_exc.NeutronException, nsx_exc.NsxPluginException,
api_exc.NsxApiException): api_exc.NsxApiException):
# Any exception here should be regarded as non-fatal # Any exception here should be regarded as non-fatal
LOG.exception(_("An error occurred while operating on the " LOG.exception(_LE("An error occurred while operating on the "
"metadata access network for router:'%s'"), "metadata access network for router:'%s'"),
router_id) router_id)

View File

@ -25,6 +25,7 @@ from neutron.common import constants as const
from neutron.common import rpc as n_rpc from neutron.common import rpc as n_rpc
from neutron.common import topics from neutron.common import topics
from neutron.db import agents_db from neutron.db import agents_db
from neutron.i18n import _LW
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.plugins.vmware.common import config from neutron.plugins.vmware.common import config
from neutron.plugins.vmware.common import exceptions as nsx_exc from neutron.plugins.vmware.common import exceptions as nsx_exc
@ -106,7 +107,7 @@ class DhcpMetadataAccess(object):
# This becomes ineffective, as all new networks creations # This becomes ineffective, as all new networks creations
# are handled by Logical Services Nodes in NSX # are handled by Logical Services Nodes in NSX
cfg.CONF.set_override('network_auto_schedule', False) cfg.CONF.set_override('network_auto_schedule', False)
LOG.warn(_('network_auto_schedule has been disabled')) LOG.warn(_LW('network_auto_schedule has been disabled'))
notifier = combined.DhcpAgentNotifyAPI(self.safe_reference, notifier = combined.DhcpAgentNotifyAPI(self.safe_reference,
lsn_manager) lsn_manager)
self.supported_extension_aliases.append(lsn.EXT_ALIAS) self.supported_extension_aliases.append(lsn.EXT_ALIAS)

View File

@ -15,6 +15,7 @@
from oslo.config import cfg from oslo.config import cfg
from neutron.i18n import _LI
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.plugins.vmware.common import exceptions from neutron.plugins.vmware.common import exceptions
@ -58,7 +59,7 @@ class NSXCluster(object):
raise exceptions.InvalidClusterConfiguration( raise exceptions.InvalidClusterConfiguration(
invalid_attrs=self._required_attributes) invalid_attrs=self._required_attributes)
if self._important_attributes: if self._important_attributes:
LOG.info(_("The following cluster attributes were " LOG.info(_LI("The following cluster attributes were "
"not specified: %s'"), self._important_attributes) "not specified: %s'"), self._important_attributes)
# The API client will be explicitly created by users of this class # The API client will be explicitly created by users of this class
self.api_client = None self.api_client = None

View File

@ -18,6 +18,7 @@ from oslo.serialization import jsonutils
from oslo.utils import excutils from oslo.utils import excutils
from neutron.common import exceptions as exception from neutron.common import exceptions as exception
from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import log from neutron.openstack.common import log
from neutron.plugins.vmware.api_client import exception as api_exc from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import exceptions as nsx_exc from neutron.plugins.vmware.common import exceptions as nsx_exc
@ -253,7 +254,7 @@ def update_explicit_routes_lrouter(cluster, router_id, routes):
router_id, route) router_id, route)
added_routes.append(uuid) added_routes.append(uuid)
except api_exc.NsxApiException: except api_exc.NsxApiException:
LOG.exception(_('Cannot update NSX routes %(routes)s for ' LOG.exception(_LE('Cannot update NSX routes %(routes)s for '
'router %(router_id)s'), 'router %(router_id)s'),
{'routes': routes, 'router_id': router_id}) {'routes': routes, 'router_id': router_id})
# Roll back to keep NSX in consistent state # Roll back to keep NSX in consistent state
@ -347,8 +348,8 @@ def create_router_lport(cluster, lrouter_uuid, tenant_id, neutron_port_id,
result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj), result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj),
cluster=cluster) cluster=cluster)
LOG.debug(_("Created logical port %(lport_uuid)s on " LOG.debug("Created logical port %(lport_uuid)s on "
"logical router %(lrouter_uuid)s"), "logical router %(lrouter_uuid)s",
{'lport_uuid': result['uuid'], {'lport_uuid': result['uuid'],
'lrouter_uuid': lrouter_uuid}) 'lrouter_uuid': lrouter_uuid})
return result return result
@ -375,8 +376,8 @@ def update_router_lport(cluster, lrouter_uuid, lrouter_port_uuid,
result = nsxlib.do_request(HTTP_PUT, path, result = nsxlib.do_request(HTTP_PUT, path,
jsonutils.dumps(lport_obj), jsonutils.dumps(lport_obj),
cluster=cluster) cluster=cluster)
LOG.debug(_("Updated logical port %(lport_uuid)s on " LOG.debug("Updated logical port %(lport_uuid)s on "
"logical router %(lrouter_uuid)s"), "logical router %(lrouter_uuid)s",
{'lport_uuid': lrouter_port_uuid, 'lrouter_uuid': lrouter_uuid}) {'lport_uuid': lrouter_port_uuid, 'lrouter_uuid': lrouter_uuid})
return result return result
@ -386,8 +387,8 @@ def delete_router_lport(cluster, lrouter_uuid, lport_uuid):
path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_uuid, path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_uuid,
lrouter_uuid) lrouter_uuid)
nsxlib.do_request(HTTP_DELETE, path, cluster=cluster) nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
LOG.debug(_("Delete logical router port %(lport_uuid)s on " LOG.debug("Delete logical router port %(lport_uuid)s on "
"logical router %(lrouter_uuid)s"), "logical router %(lrouter_uuid)s",
{'lport_uuid': lport_uuid, {'lport_uuid': lport_uuid,
'lrouter_uuid': lrouter_uuid}) 'lrouter_uuid': lrouter_uuid})
@ -456,7 +457,7 @@ def _create_nat_match_obj(**kwargs):
def _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj): def _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj):
LOG.debug(_("Creating NAT rule: %s"), nat_rule_obj) LOG.debug("Creating NAT rule: %s", nat_rule_obj)
uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE, uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE,
parent_resource_id=router_id) parent_resource_id=router_id)
return nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(nat_rule_obj), return nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(nat_rule_obj),
@ -471,13 +472,13 @@ def _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj):
def create_lrouter_nosnat_rule_v2(cluster, _router_id, _match_criteria=None): def create_lrouter_nosnat_rule_v2(cluster, _router_id, _match_criteria=None):
LOG.info(_("No SNAT rules cannot be applied as they are not available in " LOG.info(_LI("No SNAT rules cannot be applied as they are not available "
"this version of the NSX platform")) "in this version of the NSX platform"))
def create_lrouter_nodnat_rule_v2(cluster, _router_id, _match_criteria=None): def create_lrouter_nodnat_rule_v2(cluster, _router_id, _match_criteria=None):
LOG.info(_("No DNAT rules cannot be applied as they are not available in " LOG.info(_LI("No DNAT rules cannot be applied as they are not available "
"this version of the NSX platform")) "in this version of the NSX platform"))
def create_lrouter_snat_rule_v2(cluster, router_id, def create_lrouter_snat_rule_v2(cluster, router_id,
@ -577,7 +578,7 @@ def delete_nat_rules_by_match(cluster, router_id, rule_type,
min_rules=min_num_expected, min_rules=min_num_expected,
max_rules=max_num_expected) max_rules=max_num_expected)
else: else:
LOG.warn(_("Found %(actual_rule_num)d matching NAT rules, which " LOG.warn(_LW("Found %(actual_rule_num)d matching NAT rules, which "
"is not in the expected range (%(min_exp_rule_num)d," "is not in the expected range (%(min_exp_rule_num)d,"
"%(max_exp_rule_num)d)"), "%(max_exp_rule_num)d)"),
{'actual_rule_num': num_rules_to_delete, {'actual_rule_num': num_rules_to_delete,

View File

@ -18,6 +18,7 @@ from oslo.utils import excutils
from neutron.common import constants from neutron.common import constants
from neutron.common import exceptions from neutron.common import exceptions
from neutron.i18n import _LW
from neutron.openstack.common import log from neutron.openstack.common import log
from neutron.plugins.vmware.common import utils from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware import nsxlib from neutron.plugins.vmware import nsxlib
@ -92,7 +93,7 @@ def create_security_profile(cluster, tenant_id, neutron_id, security_profile):
{'ethertype': 'IPv6'}]} {'ethertype': 'IPv6'}]}
update_security_group_rules(cluster, rsp['uuid'], rules) update_security_group_rules(cluster, rsp['uuid'], rules)
LOG.debug(_("Created Security Profile: %s"), rsp) LOG.debug("Created Security Profile: %s", rsp)
return rsp return rsp
@ -118,7 +119,7 @@ def update_security_group_rules(cluster, spid, rules):
LOG.error(nsxlib.format_exception("Unknown", e, locals())) LOG.error(nsxlib.format_exception("Unknown", e, locals()))
#FIXME(salvatore-orlando): This should not raise NeutronException #FIXME(salvatore-orlando): This should not raise NeutronException
raise exceptions.NeutronException() raise exceptions.NeutronException()
LOG.debug(_("Updated Security Profile: %s"), rsp) LOG.debug("Updated Security Profile: %s", rsp)
return rsp return rsp
@ -138,5 +139,5 @@ def delete_security_profile(cluster, spid):
except exceptions.NotFound: except exceptions.NotFound:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
# This is not necessarily an error condition # This is not necessarily an error condition
LOG.warn(_("Unable to find security profile %s on NSX backend"), LOG.warn(_LW("Unable to find security profile %s on NSX backend"),
spid) spid)

View File

@ -19,6 +19,7 @@ from oslo.serialization import jsonutils
from neutron.common import constants from neutron.common import constants
from neutron.common import exceptions as exception from neutron.common import exceptions as exception
from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import log from neutron.openstack.common import log
from neutron.plugins.vmware.api_client import exception as api_exc from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import exceptions as nsx_exc from neutron.plugins.vmware.common import exceptions as nsx_exc
@ -127,7 +128,7 @@ def create_lswitch(cluster, neutron_net_id, tenant_id, display_name,
uri = nsxlib._build_uri_path(LSWITCH_RESOURCE) uri = nsxlib._build_uri_path(LSWITCH_RESOURCE)
lswitch = nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(lswitch_obj), lswitch = nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(lswitch_obj),
cluster=cluster) cluster=cluster)
LOG.debug(_("Created logical switch: %s"), lswitch['uuid']) LOG.debug("Created logical switch: %s", lswitch['uuid'])
return lswitch return lswitch
@ -147,7 +148,7 @@ def update_lswitch(cluster, lswitch_id, display_name,
return nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(lswitch_obj), return nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(lswitch_obj),
cluster=cluster) cluster=cluster)
except exception.NotFound as e: except exception.NotFound as e:
LOG.error(_("Network not found, Error: %s"), str(e)) LOG.error(_LE("Network not found, Error: %s"), str(e))
raise exception.NetworkNotFound(net_id=lswitch_id) raise exception.NetworkNotFound(net_id=lswitch_id)
@ -162,7 +163,7 @@ def delete_networks(cluster, net_id, lswitch_ids):
try: try:
nsxlib.do_request(HTTP_DELETE, path, cluster=cluster) nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
except exception.NotFound as e: except exception.NotFound as e:
LOG.error(_("Network not found, Error: %s"), str(e)) LOG.error(_LE("Network not found, Error: %s"), str(e))
raise exception.NetworkNotFound(net_id=ls_id) raise exception.NetworkNotFound(net_id=ls_id)
@ -185,7 +186,7 @@ def delete_port(cluster, switch, port):
try: try:
nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster) nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster)
except exception.NotFound: except exception.NotFound:
LOG.exception(_("Port or Network not found")) LOG.exception(_LE("Port or Network not found"))
raise exception.PortNotFoundOnNetwork( raise exception.PortNotFoundOnNetwork(
net_id=switch, port_id=port) net_id=switch, port_id=port)
except api_exc.NsxApiException: except api_exc.NsxApiException:
@ -244,7 +245,7 @@ def get_ports(cluster, networks=None, devices=None, tenants=None):
if not ports: if not ports:
ports = nsxlib.get_all_query_pages(lport_query_path, cluster) ports = nsxlib.get_all_query_pages(lport_query_path, cluster)
except exception.NotFound: except exception.NotFound:
LOG.warn(_("Lswitch %s not found in NSX"), lswitch) LOG.warn(_LW("Lswitch %s not found in NSX"), lswitch)
ports = None ports = None
if ports: if ports:
@ -270,15 +271,15 @@ def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id):
fields='uuid', fields='uuid',
filters={'tag': neutron_port_id, filters={'tag': neutron_port_id,
'tag_scope': 'q_port_id'}) 'tag_scope': 'q_port_id'})
LOG.debug(_("Looking for port with q_port_id tag '%(neutron_port_id)s' " LOG.debug("Looking for port with q_port_id tag '%(neutron_port_id)s' "
"on: '%(lswitch_uuid)s'"), "on: '%(lswitch_uuid)s'",
{'neutron_port_id': neutron_port_id, {'neutron_port_id': neutron_port_id,
'lswitch_uuid': lswitch_uuid}) 'lswitch_uuid': lswitch_uuid})
res = nsxlib.do_request(HTTP_GET, uri, cluster=cluster) res = nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
num_results = len(res["results"]) num_results = len(res["results"])
if num_results >= 1: if num_results >= 1:
if num_results > 1: if num_results > 1:
LOG.warn(_("Found '%(num_ports)d' ports with " LOG.warn(_LW("Found '%(num_ports)d' ports with "
"q_port_id tag: '%(neutron_port_id)s'. " "q_port_id tag: '%(neutron_port_id)s'. "
"Only 1 was expected."), "Only 1 was expected."),
{'num_ports': num_results, {'num_ports': num_results,
@ -287,7 +288,7 @@ def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id):
def get_port(cluster, network, port, relations=None): def get_port(cluster, network, port, relations=None):
LOG.info(_("get_port() %(network)s %(port)s"), LOG.info(_LI("get_port() %(network)s %(port)s"),
{'network': network, 'port': port}) {'network': network, 'port': port})
uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?" uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?"
if relations: if relations:
@ -295,7 +296,7 @@ def get_port(cluster, network, port, relations=None):
try: try:
return nsxlib.do_request(HTTP_GET, uri, cluster=cluster) return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
except exception.NotFound as e: except exception.NotFound as e:
LOG.error(_("Port or Network not found, Error: %s"), str(e)) LOG.error(_LE("Port or Network not found, Error: %s"), str(e))
raise exception.PortNotFoundOnNetwork( raise exception.PortNotFoundOnNetwork(
port_id=port, net_id=network) port_id=port, net_id=network)
@ -321,12 +322,12 @@ def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id,
try: try:
result = nsxlib.do_request(HTTP_PUT, path, jsonutils.dumps(lport_obj), result = nsxlib.do_request(HTTP_PUT, path, jsonutils.dumps(lport_obj),
cluster=cluster) cluster=cluster)
LOG.debug(_("Updated logical port %(result)s " LOG.debug("Updated logical port %(result)s "
"on logical switch %(uuid)s"), "on logical switch %(uuid)s",
{'result': result['uuid'], 'uuid': lswitch_uuid}) {'result': result['uuid'], 'uuid': lswitch_uuid})
return result return result
except exception.NotFound as e: except exception.NotFound as e:
LOG.error(_("Port or Network not found, Error: %s"), str(e)) LOG.error(_LE("Port or Network not found, Error: %s"), str(e))
raise exception.PortNotFoundOnNetwork( raise exception.PortNotFoundOnNetwork(
port_id=lport_uuid, net_id=lswitch_uuid) port_id=lport_uuid, net_id=lswitch_uuid)
@ -356,7 +357,7 @@ def create_lport(cluster, lswitch_uuid, tenant_id, neutron_port_id,
result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj), result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj),
cluster=cluster) cluster=cluster)
LOG.debug(_("Created logical port %(result)s on logical switch %(uuid)s"), LOG.debug("Created logical port %(result)s on logical switch %(uuid)s",
{'result': result['uuid'], 'uuid': lswitch_uuid}) {'result': result['uuid'], 'uuid': lswitch_uuid})
return result return result
@ -368,7 +369,7 @@ def get_port_status(cluster, lswitch_id, port_id):
"/ws.v1/lswitch/%s/lport/%s/status" % "/ws.v1/lswitch/%s/lport/%s/status" %
(lswitch_id, port_id), cluster=cluster) (lswitch_id, port_id), cluster=cluster)
except exception.NotFound as e: except exception.NotFound as e:
LOG.error(_("Port not found, Error: %s"), str(e)) LOG.error(_LE("Port not found, Error: %s"), str(e))
raise exception.PortNotFoundOnNetwork( raise exception.PortNotFoundOnNetwork(
port_id=port_id, net_id=lswitch_id) port_id=port_id, net_id=lswitch_id)
if r['link_status_up'] is True: if r['link_status_up'] is True:

View File

@ -51,7 +51,7 @@ from neutron.extensions import portbindings as pbin
from neutron.extensions import portsecurity as psec from neutron.extensions import portsecurity as psec
from neutron.extensions import providernet as pnet from neutron.extensions import providernet as pnet
from neutron.extensions import securitygroup as ext_sg from neutron.extensions import securitygroup as ext_sg
from neutron.i18n import _LE from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import lockutils from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as plugin_const from neutron.plugins.common import constants as plugin_const
@ -209,7 +209,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
self._is_default_net_gw_in_sync = True self._is_default_net_gw_in_sync = True
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Unable to process default l2 gw service:%s"), LOG.exception(_LE("Unable to process default l2 gw service: "
"%s"),
def_l2_gw_uuid) def_l2_gw_uuid)
def _build_ip_address_list(self, context, fixed_ips, subnet_ids=None): def _build_ip_address_list(self, context, fixed_ips, subnet_ids=None):
@ -246,7 +247,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
port_data.get('mac_address')) port_data.get('mac_address'))
LOG.debug("Created NSX router port:%s", lrouter_port['uuid']) LOG.debug("Created NSX router port:%s", lrouter_port['uuid'])
except api_exc.NsxApiException: except api_exc.NsxApiException:
LOG.exception(_("Unable to create port on NSX logical router %s"), LOG.exception(_LE("Unable to create port on NSX logical router "
"%s"),
nsx_router_id) nsx_router_id)
raise nsx_exc.NsxPluginException( raise nsx_exc.NsxPluginException(
err_msg=_("Unable to create logical router port for neutron " err_msg=_("Unable to create logical router port for neutron "
@ -334,7 +336,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# Must remove NSX logical port # Must remove NSX logical port
routerlib.delete_router_lport(cluster, nsx_router_id, routerlib.delete_router_lport(cluster, nsx_router_id,
nsx_router_port_id) nsx_router_port_id)
LOG.exception(_("Unable to plug attachment in NSX logical " LOG.exception(_LE("Unable to plug attachment in NSX logical "
"router port %(r_port_id)s, associated with " "router port %(r_port_id)s, associated with "
"Neutron %(q_port_id)s"), "Neutron %(q_port_id)s"),
{'r_port_id': nsx_router_port_id, {'r_port_id': nsx_router_port_id,
@ -426,9 +428,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# rollback the neutron-nsx port mapping # rollback the neutron-nsx port mapping
nsx_db.delete_neutron_nsx_port_mapping(context.session, nsx_db.delete_neutron_nsx_port_mapping(context.session,
port_id) port_id)
msg = (_("An exception occurred while creating the " LOG.exception(_LE("An exception occurred while creating the "
"neutron port %s on the NSX plaform") % port_id) "neutron port %s on the NSX plaform"), port_id)
LOG.exception(msg)
def _nsx_create_port(self, context, port_data): def _nsx_create_port(self, context, port_data):
"""Driver for creating a logical switch port on NSX platform.""" """Driver for creating a logical switch port on NSX platform."""
@ -438,7 +439,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# However, in order to not break unit tests, we need to still create # However, in order to not break unit tests, we need to still create
# the DB object and return success # the DB object and return success
if self._network_is_external(context, port_data['network_id']): if self._network_is_external(context, port_data['network_id']):
LOG.info(_("NSX plugin does not support regular VIF ports on " LOG.info(_LI("NSX plugin does not support regular VIF ports on "
"external networks. Port %s will be down."), "external networks. Port %s will be down."),
port_data['network_id']) port_data['network_id'])
# No need to actually update the DB state - the default is down # No need to actually update the DB state - the default is down
@ -470,12 +471,12 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
except db_exc.DBError as e: except db_exc.DBError as e:
if (port_data['device_owner'] == constants.DEVICE_OWNER_DHCP and if (port_data['device_owner'] == constants.DEVICE_OWNER_DHCP and
isinstance(e.inner_exception, sql_exc.IntegrityError)): isinstance(e.inner_exception, sql_exc.IntegrityError)):
msg = (_("Concurrent network deletion detected; Back-end Port " LOG.warning(
"%(nsx_id)s creation to be rolled back for Neutron " _LW("Concurrent network deletion detected; Back-end "
"port: %(neutron_id)s") "Port %(nsx_id)s creation to be rolled back for "
% {'nsx_id': lport['uuid'], "Neutron port: %(neutron_id)s"),
{'nsx_id': lport['uuid'],
'neutron_id': port_data['id']}) 'neutron_id': port_data['id']})
LOG.warning(msg)
if selected_lswitch and lport: if selected_lswitch and lport:
try: try:
switchlib.delete_port(self.cluster, switchlib.delete_port(self.cluster,
@ -490,7 +491,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# does not make sense. However we cannot raise as this would break # does not make sense. However we cannot raise as this would break
# unit tests. # unit tests.
if self._network_is_external(context, port_data['network_id']): if self._network_is_external(context, port_data['network_id']):
LOG.info(_("NSX plugin does not support regular VIF ports on " LOG.info(_LI("NSX plugin does not support regular VIF ports on "
"external networks. Port %s will be down."), "external networks. Port %s will be down."),
port_data['network_id']) port_data['network_id'])
return return
@ -509,7 +510,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
{'port_id': port_data['id'], {'port_id': port_data['id'],
'net_id': port_data['network_id']}) 'net_id': port_data['network_id']})
except n_exc.NotFound: except n_exc.NotFound:
LOG.warning(_("Port %s not found in NSX"), port_data['id']) LOG.warning(_LW("Port %s not found in NSX"), port_data['id'])
def _nsx_delete_router_port(self, context, port_data): def _nsx_delete_router_port(self, context, port_data):
# Delete logical router port # Delete logical router port
@ -518,7 +519,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, port_data['id']) context.session, self.cluster, port_data['id'])
if not nsx_port_id: if not nsx_port_id:
LOG.warn(_("Neutron port %(port_id)s not found on NSX backend. " LOG.warn(
_LW("Neutron port %(port_id)s not found on NSX backend. "
"Terminating delete operation. A dangling router port " "Terminating delete operation. A dangling router port "
"might have been left on router %(router_id)s"), "might have been left on router %(router_id)s"),
{'port_id': port_data['id'], {'port_id': port_data['id'],
@ -533,7 +535,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# Do not raise because the issue might as well be that the # Do not raise because the issue might as well be that the
# router has already been deleted, so there would be nothing # router has already been deleted, so there would be nothing
# to do here # to do here
LOG.exception(_("Ignoring exception as this means the peer " LOG.exception(_LE("Ignoring exception as this means the peer "
"for port '%s' has already been deleted."), "for port '%s' has already been deleted."),
nsx_port_id) nsx_port_id)
@ -688,7 +690,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# However, in order to not break unit tests, we need to still create # However, in order to not break unit tests, we need to still create
# the DB object and return success # the DB object and return success
if self._network_is_external(context, port_data['network_id']): if self._network_is_external(context, port_data['network_id']):
LOG.info(_("NSX plugin does not support regular VIF ports on " LOG.info(_LI("NSX plugin does not support regular VIF ports on "
"external networks. Port %s will be down."), "external networks. Port %s will be down."),
port_data['network_id']) port_data['network_id'])
# No need to actually update the DB state - the default is down # No need to actually update the DB state - the default is down
@ -887,7 +889,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
context.session, network.id, selected_lswitch['uuid']) context.session, network.id, selected_lswitch['uuid'])
return selected_lswitch return selected_lswitch
else: else:
LOG.error(_("Maximum number of logical ports reached for " LOG.error(_LE("Maximum number of logical ports reached for "
"logical network %s"), network.id) "logical network %s"), network.id)
raise nsx_exc.NoMorePortsException(network=network.id) raise nsx_exc.NoMorePortsException(network=network.id)
@ -933,7 +935,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
net_data[key] = None net_data[key] = None
# FIXME(arosen) implement admin_state_up = False in NSX # FIXME(arosen) implement admin_state_up = False in NSX
if net_data['admin_state_up'] is False: if net_data['admin_state_up'] is False:
LOG.warning(_("Network with admin_state_up=False are not yet " LOG.warning(_LW("Network with admin_state_up=False are not yet "
"supported by this plugin. Ignoring setting for " "supported by this plugin. Ignoring setting for "
"network %s"), net_data.get('name', '<unknown>')) "network %s"), net_data.get('name', '<unknown>'))
transport_zone_config = self._convert_to_nsx_transport_zones( transport_zone_config = self._convert_to_nsx_transport_zones(
@ -1011,8 +1013,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
try: try:
switchlib.delete_networks(self.cluster, id, lswitch_ids) switchlib.delete_networks(self.cluster, id, lswitch_ids)
except n_exc.NotFound: except n_exc.NotFound:
LOG.warning(_("The following logical switches were not found " LOG.warning(_LW("The following logical switches were not "
"on the NSX backend:%s"), lswitch_ids) "found on the NSX backend:%s"), lswitch_ids)
self.handle_network_dhcp_access(context, id, action='delete_network') self.handle_network_dhcp_access(context, id, action='delete_network')
LOG.debug("Delete network complete for network: %s", id) LOG.debug("Delete network complete for network: %s", id)
@ -1068,14 +1070,14 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
nsx_switch_ids = nsx_utils.get_nsx_switch_ids( nsx_switch_ids = nsx_utils.get_nsx_switch_ids(
context.session, self.cluster, id) context.session, self.cluster, id)
if not nsx_switch_ids or len(nsx_switch_ids) < 1: if not nsx_switch_ids or len(nsx_switch_ids) < 1:
LOG.warn(_("Unable to find NSX mappings for neutron " LOG.warn(_LW("Unable to find NSX mappings for neutron "
"network:%s"), id) "network:%s"), id)
try: try:
switchlib.update_lswitch(self.cluster, switchlib.update_lswitch(self.cluster,
nsx_switch_ids[0], nsx_switch_ids[0],
network['network']['name']) network['network']['name'])
except api_exc.NsxApiException as e: except api_exc.NsxApiException as e:
LOG.warn(_("Logical switch update on NSX backend failed. " LOG.warn(_LW("Logical switch update on NSX backend failed. "
"Neutron network id:%(net_id)s; " "Neutron network id:%(net_id)s; "
"NSX lswitch id:%(lswitch_id)s;" "NSX lswitch id:%(lswitch_id)s;"
"Error:%(error)s"), "Error:%(error)s"),
@ -1155,7 +1157,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
LOG.debug("port created on NSX backend for tenant " LOG.debug("port created on NSX backend for tenant "
"%(tenant_id)s: (%(id)s)", port_data) "%(tenant_id)s: (%(id)s)", port_data)
except n_exc.NotFound: except n_exc.NotFound:
LOG.warning(_("Logical switch for network %s was not " LOG.warning(_LW("Logical switch for network %s was not "
"found in NSX."), port_data['network_id']) "found in NSX."), port_data['network_id'])
# Put port in error on neutron DB # Put port in error on neutron DB
with context.session.begin(subtransactions=True): with context.session.begin(subtransactions=True):
@ -1166,7 +1168,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
except Exception: except Exception:
# Port must be removed from neutron DB # Port must be removed from neutron DB
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_("Unable to create port or set port " LOG.error(_LE("Unable to create port or set port "
"attachment in NSX.")) "attachment in NSX."))
with context.session.begin(subtransactions=True): with context.session.begin(subtransactions=True):
self._delete_port(context, neutron_port_id) self._delete_port(context, neutron_port_id)
@ -1289,7 +1291,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# FIXME(arosen) improve exception handling. # FIXME(arosen) improve exception handling.
except Exception: except Exception:
ret_port['status'] = constants.PORT_STATUS_ERROR ret_port['status'] = constants.PORT_STATUS_ERROR
LOG.exception(_("Unable to update port id: %s."), LOG.exception(_LE("Unable to update port id: %s."),
nsx_port_id) nsx_port_id)
# If nsx_port_id is not in database or in nsx put in error state. # If nsx_port_id is not in database or in nsx put in error state.
@ -1389,9 +1391,10 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
"L3GatewayAttachment", "L3GatewayAttachment",
self.cluster.default_l3_gw_service_uuid) self.cluster.default_l3_gw_service_uuid)
except nsx_exc.NsxPluginException: except nsx_exc.NsxPluginException:
LOG.exception(_("Unable to create L3GW port on logical router " LOG.exception(_LE("Unable to create L3GW port on logical router "
"%(router_uuid)s. Verify Default Layer-3 Gateway " "%(router_uuid)s. Verify Default Layer-3 "
"service %(def_l3_gw_svc)s id is correct"), "Gateway service %(def_l3_gw_svc)s id is "
"correct"),
{'router_uuid': lrouter['uuid'], {'router_uuid': lrouter['uuid'],
'def_l3_gw_svc': 'def_l3_gw_svc':
self.cluster.default_l3_gw_service_uuid}) self.cluster.default_l3_gw_service_uuid})
@ -1477,10 +1480,10 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# As setting gateway failed, the router must be deleted # As setting gateway failed, the router must be deleted
# in order to ensure atomicity # in order to ensure atomicity
router_id = router_db['id'] router_id = router_db['id']
LOG.warn(_("Failed to set gateway info for router being " LOG.warn(_LW("Failed to set gateway info for router being "
"created:%s - removing router"), router_id) "created:%s - removing router"), router_id)
self.delete_router(context, router_id) self.delete_router(context, router_id)
LOG.info(_("Create router failed while setting external " LOG.info(_LI("Create router failed while setting external "
"gateway. Router:%s has been removed from " "gateway. Router:%s has been removed from "
"DB and backend"), "DB and backend"),
router_id) router_id)
@ -1601,7 +1604,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
self._delete_lrouter(context, router_id, nsx_router_id) self._delete_lrouter(context, router_id, nsx_router_id)
except n_exc.NotFound: except n_exc.NotFound:
# This is not a fatal error, but needs to be logged # This is not a fatal error, but needs to be logged
LOG.warning(_("Logical router '%s' not found " LOG.warning(_LW("Logical router '%s' not found "
"on NSX Platform"), router_id) "on NSX Platform"), router_id)
except api_exc.NsxApiException: except api_exc.NsxApiException:
raise nsx_exc.NsxPluginException( raise nsx_exc.NsxPluginException(
@ -1615,7 +1618,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
context.session, router_id) context.session, router_id)
except db_exc.DBError as d_exc: except db_exc.DBError as d_exc:
# Do not make this error fatal # Do not make this error fatal
LOG.warn(_("Unable to remove NSX mapping for Neutron router " LOG.warn(_LW("Unable to remove NSX mapping for Neutron router "
"%(router_id)s because of the following exception:" "%(router_id)s because of the following exception:"
"%(d_exc)s"), {'router_id': router_id, "%(d_exc)s"), {'router_id': router_id,
'd_exc': str(d_exc)}) 'd_exc': str(d_exc)})
@ -1753,8 +1756,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
raise_on_len_mismatch=False, raise_on_len_mismatch=False,
destination_ip_addresses=subnet['cidr']) destination_ip_addresses=subnet['cidr'])
except n_exc.NotFound: except n_exc.NotFound:
LOG.error(_("Logical router resource %s not found " LOG.error(_LE("Logical router resource %s not found "
"on NSX platform") % router_id) "on NSX platform"), router_id)
except api_exc.NsxApiException: except api_exc.NsxApiException:
raise nsx_exc.NsxPluginException( raise nsx_exc.NsxPluginException(
err_msg=(_("Unable to update logical router" err_msg=(_("Unable to update logical router"
@ -1789,12 +1792,12 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
except api_exc.NsxApiException: except api_exc.NsxApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("An error occurred while removing NAT rules " LOG.exception(_LE("An error occurred while removing NAT rules "
"on the NSX platform for floating ip:%s"), "on the NSX platform for floating ip:%s"),
floating_ip_address) floating_ip_address)
except nsx_exc.NatRuleMismatch: except nsx_exc.NatRuleMismatch:
# Do not surface to the user # Do not surface to the user
LOG.warning(_("An incorrect number of matching NAT rules " LOG.warning(_LW("An incorrect number of matching NAT rules "
"was found on the NSX platform")) "was found on the NSX platform"))
def _remove_floatingip_address(self, context, fip_db): def _remove_floatingip_address(self, context, fip_db):
@ -1936,7 +1939,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
self.cluster, nsx_router_id, nsx_gw_port_id, self.cluster, nsx_router_id, nsx_gw_port_id,
ips_to_add=nsx_floating_ips, ips_to_remove=[]) ips_to_add=nsx_floating_ips, ips_to_remove=[])
except api_exc.NsxApiException: except api_exc.NsxApiException:
LOG.exception(_("An error occurred while creating NAT " LOG.exception(_LE("An error occurred while creating NAT "
"rules on the NSX platform for floating " "rules on the NSX platform for floating "
"ip:%(floating_ip)s mapped to " "ip:%(floating_ip)s mapped to "
"internal ip:%(internal_ip)s"), "internal ip:%(internal_ip)s"),
@ -1984,7 +1987,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
LOG.debug("The port '%s' is not associated with floating IPs", LOG.debug("The port '%s' is not associated with floating IPs",
port_id) port_id)
except n_exc.NotFound: except n_exc.NotFound:
LOG.warning(_("Nat rules not found in nsx for port: %s"), id) LOG.warning(_LW("Nat rules not found in nsx for port: %s"), id)
# NOTE(ihrachys): L3 agent notifications don't make sense for # NOTE(ihrachys): L3 agent notifications don't make sense for
# NSX VMWare plugin since there is no L3 agent in such setup, so # NSX VMWare plugin since there is no L3 agent in such setup, so
@ -2048,7 +2051,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
except api_exc.ResourceNotFound: except api_exc.ResourceNotFound:
# Do not cause a 500 to be returned to the user if # Do not cause a 500 to be returned to the user if
# the corresponding NSX resource does not exist # the corresponding NSX resource does not exist
LOG.exception(_("Unable to remove gateway service from " LOG.exception(_LE("Unable to remove gateway service from "
"NSX plaform - the resource was not found")) "NSX plaform - the resource was not found"))
def get_network_gateway(self, context, id, fields=None): def get_network_gateway(self, context, id, fields=None):
@ -2077,7 +2080,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
except api_exc.NsxApiException: except api_exc.NsxApiException:
# Consider backend failures as non-fatal, but still warn # Consider backend failures as non-fatal, but still warn
# because this might indicate something dodgy is going on # because this might indicate something dodgy is going on
LOG.warn(_("Unable to update name on NSX backend " LOG.warn(_LW("Unable to update name on NSX backend "
"for network gateway: %s"), id) "for network gateway: %s"), id)
return super(NsxPluginV2, self).update_network_gateway( return super(NsxPluginV2, self).update_network_gateway(
context, id, network_gateway) context, id, network_gateway)
@ -2284,14 +2287,14 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
try: try:
l2gwlib.delete_gateway_device(self.cluster, nsx_device_id) l2gwlib.delete_gateway_device(self.cluster, nsx_device_id)
except n_exc.NotFound: except n_exc.NotFound:
LOG.warn(_("Removal of gateway device: %(neutron_id)s failed on " LOG.warn(_LW("Removal of gateway device: %(neutron_id)s failed on "
"NSX backend (NSX id:%(nsx_id)s) because the NSX " "NSX backend (NSX id:%(nsx_id)s) because the NSX "
"resource was not found"), "resource was not found"),
{'neutron_id': device_id, 'nsx_id': nsx_device_id}) {'neutron_id': device_id, 'nsx_id': nsx_device_id})
except api_exc.NsxApiException: except api_exc.NsxApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
# In this case a 500 should be returned # In this case a 500 should be returned
LOG.exception(_("Removal of gateway device: %(neutron_id)s " LOG.exception(_LE("Removal of gateway device: %(neutron_id)s "
"failed on NSX backend (NSX id:%(nsx_id)s). " "failed on NSX backend (NSX id:%(nsx_id)s). "
"Neutron and NSX states have diverged."), "Neutron and NSX states have diverged."),
{'neutron_id': device_id, {'neutron_id': device_id,
@ -2339,9 +2342,9 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# Reverting the DB change is not really worthwhile # Reverting the DB change is not really worthwhile
# for a mismatch between names. It's the rules that # for a mismatch between names. It's the rules that
# we care about. # we care about.
LOG.error(_('Error while updating security profile ' LOG.error(_LE('Error while updating security profile '
'%(uuid)s with name %(name)s: %(error)s.') '%(uuid)s with name %(name)s: %(error)s.'),
% {'uuid': secgroup_id, 'name': name, 'error': e}) {'uuid': secgroup_id, 'name': name, 'error': e})
return secgroup return secgroup
def delete_security_group(self, context, security_group_id): def delete_security_group(self, context, security_group_id):
@ -2371,15 +2374,16 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
except n_exc.NotFound: except n_exc.NotFound:
# The security profile was not found on the backend # The security profile was not found on the backend
# do not fail in this case. # do not fail in this case.
LOG.warning(_("The NSX security profile %(sec_profile_id)s, " LOG.warning(_LW("The NSX security profile %(sec_profile_id)s, "
"associated with the Neutron security group " "associated with the Neutron security group "
"%(sec_group_id)s was not found on the backend"), "%(sec_group_id)s was not found on the "
"backend"),
{'sec_profile_id': nsx_sec_profile_id, {'sec_profile_id': nsx_sec_profile_id,
'sec_group_id': security_group_id}) 'sec_group_id': security_group_id})
except api_exc.NsxApiException: except api_exc.NsxApiException:
# Raise and fail the operation, as there is a problem which # Raise and fail the operation, as there is a problem which
# prevented the sec group from being removed from the backend # prevented the sec group from being removed from the backend
LOG.exception(_("An exception occurred while removing the " LOG.exception(_LE("An exception occurred while removing the "
"NSX security profile %(sec_profile_id)s, " "NSX security profile %(sec_profile_id)s, "
"associated with Netron security group " "associated with Netron security group "
"%(sec_group_id)s"), "%(sec_group_id)s"),

View File

@ -29,6 +29,7 @@ from neutron.extensions import firewall as fw_ext
from neutron.extensions import l3 from neutron.extensions import l3
from neutron.extensions import routedserviceinsertion as rsi from neutron.extensions import routedserviceinsertion as rsi
from neutron.extensions import vpnaas as vpn_ext from neutron.extensions import vpnaas as vpn_ext
from neutron.i18n import _LE, _LW
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as service_constants from neutron.plugins.common import constants as service_constants
from neutron.plugins.vmware.api_client import exception as api_exc from neutron.plugins.vmware.api_client import exception as api_exc
@ -538,7 +539,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
try: try:
self.vcns_driver.delete_lswitch(lswitch_id) self.vcns_driver.delete_lswitch(lswitch_id)
except exceptions.ResourceNotFound: except exceptions.ResourceNotFound:
LOG.warning(_("Did not found lswitch %s in NSX"), lswitch_id) LOG.warning(_LW("Did not found lswitch %s in NSX"), lswitch_id)
# delete edge # delete edge
jobdata = { jobdata = {
@ -884,15 +885,14 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
except exceptions.VcnsApiException as e: except exceptions.VcnsApiException as e:
self._firewall_set_status( self._firewall_set_status(
context, fw['id'], service_constants.ERROR) context, fw['id'], service_constants.ERROR)
msg = (_("Failed to create firewall on vShield Edge " LOG.exception(_LE("Failed to create firewall on vShield Edge "
"bound on router %s") % router_id) "bound on router %s"), router_id)
LOG.exception(msg)
raise e raise e
except exceptions.VcnsBadRequest as e: except exceptions.VcnsBadRequest as e:
self._firewall_set_status( self._firewall_set_status(
context, fw['id'], service_constants.ERROR) context, fw['id'], service_constants.ERROR)
LOG.exception(_("Bad Firewall request Input")) LOG.exception(_LE("Bad Firewall request Input"))
raise e raise e
def _vcns_delete_firewall(self, context, router_id=None, **kwargs): def _vcns_delete_firewall(self, context, router_id=None, **kwargs):
@ -1113,7 +1113,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
context, loadbalancer_db.Vip, resource_id=vip_id) context, loadbalancer_db.Vip, resource_id=vip_id)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to find the edge with " LOG.exception(_LE("Failed to find the edge with "
"vip_id: %s"), vip_id) "vip_id: %s"), vip_id)
return self._get_edge_id_by_vcns_edge_binding( return self._get_edge_id_by_vcns_edge_binding(
context, service_router_binding.router_id) context, service_router_binding.router_id)
@ -1184,8 +1184,8 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
context, edge_id, hm) context, edge_id, hm)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create healthmonitor " LOG.exception(_LE("Failed to create healthmonitor "
"associated with pool id: %s!") % pool_id) "associated with pool id: %s!"), pool_id)
for monitor_ide in pool.get('health_monitors'): for monitor_ide in pool.get('health_monitors'):
if monitor_ide == monitor_id: if monitor_ide == monitor_id:
break break
@ -1201,7 +1201,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
self.vcns_driver.create_pool(context, edge_id, pool, members) self.vcns_driver.create_pool(context, edge_id, pool, members)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create pool on vshield edge")) LOG.exception(_LE("Failed to create pool on vshield edge"))
self.vcns_driver.delete_pool( self.vcns_driver.delete_pool(
context, pool_id, edge_id) context, pool_id, edge_id)
for monitor_id in pool.get('health_monitors'): for monitor_id in pool.get('health_monitors'):
@ -1261,7 +1261,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
self.vcns_driver.create_vip(context, edge_id, v) self.vcns_driver.create_vip(context, edge_id, v)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create vip!")) LOG.exception(_LE("Failed to create vip!"))
self._delete_resource_router_id_binding( self._delete_resource_router_id_binding(
context, v['id'], loadbalancer_db.Vip) context, v['id'], loadbalancer_db.Vip)
super(NsxAdvancedPlugin, self).delete_vip(context, v['id']) super(NsxAdvancedPlugin, self).delete_vip(context, v['id'])
@ -1301,7 +1301,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
self.vcns_driver.update_vip(context, v, session_persistence_update) self.vcns_driver.update_vip(context, v, session_persistence_update)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update vip with id: %s!"), id) LOG.exception(_LE("Failed to update vip with id: %s!"), id)
self._resource_set_status(context, loadbalancer_db.Vip, self._resource_set_status(context, loadbalancer_db.Vip,
id, service_constants.ERROR, v) id, service_constants.ERROR, v)
@ -1318,7 +1318,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
self.vcns_driver.delete_vip(context, id) self.vcns_driver.delete_vip(context, id)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete vip with id: %s!"), id) LOG.exception(_LE("Failed to delete vip with id: %s!"), id)
self._resource_set_status(context, loadbalancer_db.Vip, self._resource_set_status(context, loadbalancer_db.Vip,
id, service_constants.ERROR) id, service_constants.ERROR)
edge_id = self._get_edge_id_by_vip_id(context, id) edge_id = self._get_edge_id_by_vip_id(context, id)
@ -1374,7 +1374,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
self._vcns_update_pool(context, p) self._vcns_update_pool(context, p)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool with id: %s!"), id) LOG.exception(_LE("Failed to update pool with id: %s!"), id)
self._resource_set_status(context, loadbalancer_db.Pool, self._resource_set_status(context, loadbalancer_db.Pool,
p['id'], service_constants.ERROR, p) p['id'], service_constants.ERROR, p)
self._resource_set_status(context, loadbalancer_db.Pool, self._resource_set_status(context, loadbalancer_db.Pool,
@ -1396,7 +1396,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
self._vcns_update_pool(context, pool) self._vcns_update_pool(context, pool)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool with the member")) LOG.exception(_LE("Failed to update pool with the member"))
super(NsxAdvancedPlugin, self).delete_member(context, m['id']) super(NsxAdvancedPlugin, self).delete_member(context, m['id'])
self._resource_set_status(context, loadbalancer_db.Pool, self._resource_set_status(context, loadbalancer_db.Pool,
@ -1422,7 +1422,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
self._vcns_update_pool(context, old_pool) self._vcns_update_pool(context, old_pool)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update old pool " LOG.exception(_LE("Failed to update old pool "
"with the member")) "with the member"))
super(NsxAdvancedPlugin, self).delete_member( super(NsxAdvancedPlugin, self).delete_member(
context, m['id']) context, m['id'])
@ -1443,7 +1443,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
self._vcns_update_pool(context, pool) self._vcns_update_pool(context, pool)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool with the member")) LOG.exception(_LE("Failed to update pool with the member"))
super(NsxAdvancedPlugin, self).delete_member( super(NsxAdvancedPlugin, self).delete_member(
context, m['id']) context, m['id'])
@ -1466,7 +1466,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
self._vcns_update_pool(context, pool) self._vcns_update_pool(context, pool)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool with the member")) LOG.exception(_LE("Failed to update pool with the member"))
self._resource_set_status(context, loadbalancer_db.Pool, self._resource_set_status(context, loadbalancer_db.Pool,
pool_id, service_constants.ACTIVE) pool_id, service_constants.ACTIVE)
@ -1486,7 +1486,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
context, edge_id, old_hm, hm) context, edge_id, old_hm, hm)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update monitor " LOG.exception(_LE("Failed to update monitor "
"with id: %s!"), id) "with id: %s!"), id)
return hm return hm
@ -1525,7 +1525,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
self._vcns_update_pool(context, pool) self._vcns_update_pool(context, pool)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to associate monitor with pool!")) LOG.exception(_LE("Failed to associate monitor with pool!"))
self._resource_set_status( self._resource_set_status(
context, loadbalancer_db.Pool, context, loadbalancer_db.Pool,
pool_id, service_constants.ERROR) pool_id, service_constants.ERROR)
@ -1556,7 +1556,7 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception( LOG.exception(
_("Failed to update pool with pool_monitor!")) _LE("Failed to update pool with pool_monitor!"))
self._resource_set_status( self._resource_set_status(
context, loadbalancer_db.Pool, context, loadbalancer_db.Pool,
pool_id, service_constants.ERROR) pool_id, service_constants.ERROR)
@ -1598,14 +1598,14 @@ class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
edge_id, sites, enabled=vpn_service.admin_state_up) edge_id, sites, enabled=vpn_service.admin_state_up)
except exceptions.VcnsBadRequest: except exceptions.VcnsBadRequest:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Bad or unsupported Input request!")) LOG.exception(_LE("Bad or unsupported Input request!"))
except exceptions.VcnsApiException: except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
msg = (_("Failed to update ipsec VPN configuration " LOG.exception(_LE("Failed to update ipsec VPN configuration "
"with vpnservice: %(vpnservice_id)s on vShield Edge: " "with vpnservice: %(vpnservice_id)s on "
"%(edge_id)s") % {'vpnservice_id': vpnservice_id, "vShield Edge: %(edge_id)s"),
{'vpnservice_id': vpnservice_id,
'edge_id': edge_id}) 'edge_id': edge_id})
LOG.exception(msg)
def create_vpnservice(self, context, vpnservice): def create_vpnservice(self, context, vpnservice):
LOG.debug("create_vpnservice() called") LOG.debug("create_vpnservice() called")
@ -1753,7 +1753,7 @@ class VcnsCallbacks(object):
context, neutron_router_id) context, neutron_router_id)
except l3.RouterNotFound: except l3.RouterNotFound:
# Router might have been deleted before deploy finished # Router might have been deleted before deploy finished
LOG.exception(_("Router %s not found"), lrouter['uuid']) LOG.exception(_LE("Router %s not found"), lrouter['uuid'])
if task.status == tasks_const.TaskStatus.COMPLETED: if task.status == tasks_const.TaskStatus.COMPLETED:
LOG.debug("Successfully deployed %(edge_id)s for " LOG.debug("Successfully deployed %(edge_id)s for "

View File

@ -15,6 +15,7 @@
from oslo.serialization import jsonutils from oslo.serialization import jsonutils
from oslo.utils import excutils from oslo.utils import excutils
from neutron.i18n import _LE
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.plugins.vmware.common import utils from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware.vshield.common import constants as vcns_const from neutron.plugins.vmware.vshield.common import constants as vcns_const
@ -128,7 +129,7 @@ class EdgeApplianceDriver(object):
status_level = self._edge_status_to_level( status_level = self._edge_status_to_level(
response['edgeStatus']) response['edgeStatus'])
except exceptions.VcnsApiException as e: except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to get edge status:\n%s"), LOG.exception(_LE("VCNS: Failed to get edge status:\n%s"),
e.response) e.response)
status_level = vcns_const.RouterStatus.ROUTER_STATUS_ERROR status_level = vcns_const.RouterStatus.ROUTER_STATUS_ERROR
try: try:
@ -160,13 +161,13 @@ class EdgeApplianceDriver(object):
self.vcns.update_interface(edge_id, config) self.vcns.update_interface(edge_id, config)
except exceptions.VcnsApiException as e: except exceptions.VcnsApiException as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("VCNS: Failed to update vnic %(config)s:\n" LOG.exception(_LE("VCNS: Failed to update vnic %(config)s:\n"
"%(response)s"), { "%(response)s"), {
'config': config, 'config': config,
'response': e.response}) 'response': e.response})
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("VCNS: Failed to update vnic %d"), LOG.exception(_LE("VCNS: Failed to update vnic %d"),
config['index']) config['index'])
return constants.TaskStatus.COMPLETED return constants.TaskStatus.COMPLETED
@ -217,7 +218,7 @@ class EdgeApplianceDriver(object):
status = constants.TaskStatus.PENDING status = constants.TaskStatus.PENDING
except exceptions.VcnsApiException: except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("VCNS: deploy edge failed for router %s."), LOG.exception(_LE("VCNS: deploy edge failed for router %s."),
name) name)
return status return status
@ -236,21 +237,20 @@ class EdgeApplianceDriver(object):
status = constants.TaskStatus.ERROR status = constants.TaskStatus.ERROR
except exceptions.VcnsApiException: except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("VCNS: Edge %s status query failed."), edge_id) LOG.exception(_LE("VCNS: Edge %s status query failed."),
edge_id)
except Exception: except Exception:
retries = task.userdata.get('retries', 0) + 1 retries = task.userdata.get('retries', 0) + 1
if retries < 3: if retries < 3:
task.userdata['retries'] = retries task.userdata['retries'] = retries
msg = _("VCNS: Unable to retrieve edge %(edge_id)s status. " LOG.exception(_LE("VCNS: Unable to retrieve edge %(edge_id)s "
"Retry %(retries)d.") % { "status. Retry %(retries)d."),
'edge_id': edge_id, {'edge_id': edge_id,
'retries': retries} 'retries': retries})
LOG.exception(msg)
status = constants.TaskStatus.PENDING status = constants.TaskStatus.PENDING
else: else:
msg = _("VCNS: Unable to retrieve edge %s status. " LOG.exception(_LE("VCNS: Unable to retrieve edge %s status. "
"Abort.") % edge_id "Abort."), edge_id)
LOG.exception(msg)
status = constants.TaskStatus.ERROR status = constants.TaskStatus.ERROR
LOG.debug("VCNS: Edge %s status", edge_id) LOG.debug("VCNS: Edge %s status", edge_id)
return status return status
@ -259,7 +259,7 @@ class EdgeApplianceDriver(object):
router_name = task.userdata['router_name'] router_name = task.userdata['router_name']
edge_id = task.userdata.get('edge_id') edge_id = task.userdata.get('edge_id')
if task.status != constants.TaskStatus.COMPLETED: if task.status != constants.TaskStatus.COMPLETED:
LOG.error(_("VCNS: Failed to deploy edge %(edge_id)s " LOG.error(_LE("VCNS: Failed to deploy edge %(edge_id)s "
"for %(name)s, status %(status)d"), { "for %(name)s, status %(status)d"), {
'edge_id': edge_id, 'edge_id': edge_id,
'name': router_name, 'name': router_name,
@ -281,13 +281,12 @@ class EdgeApplianceDriver(object):
except exceptions.ResourceNotFound: except exceptions.ResourceNotFound:
pass pass
except exceptions.VcnsApiException as e: except exceptions.VcnsApiException as e:
msg = _("VCNS: Failed to delete %(edge_id)s:\n" LOG.exception(_LE("VCNS: Failed to delete %(edge_id)s:\n"
"%(response)s") % { "%(response)s"),
'edge_id': edge_id, 'response': e.response} {'edge_id': edge_id, 'response': e.response})
LOG.exception(msg)
status = constants.TaskStatus.ERROR status = constants.TaskStatus.ERROR
except Exception: except Exception:
LOG.exception(_("VCNS: Failed to delete %s"), edge_id) LOG.exception(_LE("VCNS: Failed to delete %s"), edge_id)
status = constants.TaskStatus.ERROR status = constants.TaskStatus.ERROR
return status return status
@ -297,7 +296,8 @@ class EdgeApplianceDriver(object):
return self.vcns.get_edges()[1] return self.vcns.get_edges()[1]
except exceptions.VcnsApiException as e: except exceptions.VcnsApiException as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("VCNS: Failed to get edges:\n%s"), e.response) LOG.exception(_LE("VCNS: Failed to get edges:\n%s"),
e.response)
def deploy_edge(self, router_id, name, internal_network, jobdata=None, def deploy_edge(self, router_id, name, internal_network, jobdata=None,
wait_for_exec=False, loadbalancer_enable=True): wait_for_exec=False, loadbalancer_enable=True):
@ -375,7 +375,7 @@ class EdgeApplianceDriver(object):
return self.vcns.get_nat_config(edge_id)[1] return self.vcns.get_nat_config(edge_id)[1]
except exceptions.VcnsApiException as e: except exceptions.VcnsApiException as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("VCNS: Failed to get nat config:\n%s"), LOG.exception(_LE("VCNS: Failed to get nat config:\n%s"),
e.response) e.response)
def _create_nat_rule(self, task): def _create_nat_rule(self, task):
@ -398,7 +398,7 @@ class EdgeApplianceDriver(object):
self.vcns.update_nat_config(edge_id, nat) self.vcns.update_nat_config(edge_id, nat)
status = constants.TaskStatus.COMPLETED status = constants.TaskStatus.COMPLETED
except exceptions.VcnsApiException as e: except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to create snat rule:\n%s"), LOG.exception(_LE("VCNS: Failed to create snat rule:\n%s"),
e.response) e.response)
status = constants.TaskStatus.ERROR status = constants.TaskStatus.ERROR
@ -440,7 +440,7 @@ class EdgeApplianceDriver(object):
try: try:
self.vcns.delete_nat_rule(edge_id, rule_id) self.vcns.delete_nat_rule(edge_id, rule_id)
except exceptions.VcnsApiException as e: except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to delete snat rule:\n" LOG.exception(_LE("VCNS: Failed to delete snat rule:\n"
"%s"), e.response) "%s"), e.response)
status = constants.TaskStatus.ERROR status = constants.TaskStatus.ERROR
@ -523,7 +523,7 @@ class EdgeApplianceDriver(object):
self.vcns.update_nat_config(edge_id, nat) self.vcns.update_nat_config(edge_id, nat)
status = constants.TaskStatus.COMPLETED status = constants.TaskStatus.COMPLETED
except exceptions.VcnsApiException as e: except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to create snat rule:\n%s"), LOG.exception(_LE("VCNS: Failed to create snat rule:\n%s"),
e.response) e.response)
status = constants.TaskStatus.ERROR status = constants.TaskStatus.ERROR
@ -593,7 +593,7 @@ class EdgeApplianceDriver(object):
self.vcns.update_routes(edge_id, request) self.vcns.update_routes(edge_id, request)
status = constants.TaskStatus.COMPLETED status = constants.TaskStatus.COMPLETED
except exceptions.VcnsApiException as e: except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to update routes:\n%s"), LOG.exception(_LE("VCNS: Failed to update routes:\n%s"),
e.response) e.response)
status = constants.TaskStatus.ERROR status = constants.TaskStatus.ERROR
@ -645,7 +645,7 @@ class EdgeApplianceDriver(object):
edge_id) edge_id)
except exceptions.VcnsApiException: except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to get service config")) LOG.exception(_LE("Failed to get service config"))
return response return response
def enable_service_loadbalancer(self, edge_id): def enable_service_loadbalancer(self, edge_id):
@ -657,5 +657,5 @@ class EdgeApplianceDriver(object):
self.vcns.enable_service_loadbalancer(edge_id, config) self.vcns.enable_service_loadbalancer(edge_id, config)
except exceptions.VcnsApiException: except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to enable loadbalancer " LOG.exception(_LE("Failed to enable loadbalancer "
"service config")) "service config"))

View File

@ -15,6 +15,7 @@
from oslo.utils import excutils from oslo.utils import excutils
from neutron.db import db_base_plugin_v2 from neutron.db import db_base_plugin_v2
from neutron.i18n import _LE
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.plugins.common import constants from neutron.plugins.common import constants
from neutron.plugins.vmware.dbexts import vcns_db from neutron.plugins.vmware.dbexts import vcns_db
@ -186,7 +187,7 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
return self.vcns.get_firewall(edge_id)[1] return self.vcns.get_firewall(edge_id)[1]
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to get firewall with edge " LOG.exception(_LE("Failed to get firewall with edge "
"id: %s"), edge_id) "id: %s"), edge_id)
def _get_firewall_rule_next(self, context, edge_id, rule_vseid): def _get_firewall_rule_next(self, context, edge_id, rule_vseid):
@ -214,7 +215,7 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
edge_id, vcns_rule_id)[1] edge_id, vcns_rule_id)[1]
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to get firewall rule: %(rule_id)s " LOG.exception(_LE("Failed to get firewall rule: %(rule_id)s "
"with edge_id: %(edge_id)s"), { "with edge_id: %(edge_id)s"), {
'rule_id': id, 'rule_id': id,
'edge_id': edge_id}) 'edge_id': edge_id})
@ -230,7 +231,7 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
self.vcns.update_firewall(edge_id, fw_req) self.vcns.update_firewall(edge_id, fw_req)
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update firewall " LOG.exception(_LE("Failed to update firewall "
"with edge_id: %s"), edge_id) "with edge_id: %s"), edge_id)
fw_res = self._get_firewall(context, edge_id) fw_res = self._get_firewall(context, edge_id)
vcns_db.cleanup_vcns_edge_firewallrule_binding( vcns_db.cleanup_vcns_edge_firewallrule_binding(
@ -242,7 +243,7 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
self.vcns.delete_firewall(edge_id) self.vcns.delete_firewall(edge_id)
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete firewall " LOG.exception(_LE("Failed to delete firewall "
"with edge_id:%s"), edge_id) "with edge_id:%s"), edge_id)
vcns_db.cleanup_vcns_edge_firewallrule_binding( vcns_db.cleanup_vcns_edge_firewallrule_binding(
context.session, edge_id) context.session, edge_id)
@ -256,8 +257,8 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
self.vcns.update_firewall_rule(edge_id, vcns_rule_id, fwr_req) self.vcns.update_firewall_rule(edge_id, vcns_rule_id, fwr_req)
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update firewall rule: %(rule_id)s " LOG.exception(_LE("Failed to update firewall rule: "
"with edge_id: %(edge_id)s"), "%(rule_id)s with edge_id: %(edge_id)s"),
{'rule_id': id, {'rule_id': id,
'edge_id': edge_id}) 'edge_id': edge_id})
@ -269,8 +270,8 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
self.vcns.delete_firewall_rule(edge_id, vcns_rule_id) self.vcns.delete_firewall_rule(edge_id, vcns_rule_id)
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete firewall rule: %(rule_id)s " LOG.exception(_LE("Failed to delete firewall rule: "
"with edge_id: %(edge_id)s"), "%(rule_id)s with edge_id: %(edge_id)s"),
{'rule_id': id, {'rule_id': id,
'edge_id': edge_id}) 'edge_id': edge_id})
vcns_db.delete_vcns_edge_firewallrule_binding( vcns_db.delete_vcns_edge_firewallrule_binding(
@ -286,7 +287,7 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
edge_id, ref_vcns_rule_id, fwr_req)[0] edge_id, ref_vcns_rule_id, fwr_req)[0]
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to add firewall rule above: " LOG.exception(_LE("Failed to add firewall rule above: "
"%(rule_id)s with edge_id: %(edge_id)s"), "%(rule_id)s with edge_id: %(edge_id)s"),
{'rule_id': ref_vcns_rule_id, {'rule_id': ref_vcns_rule_id,
'edge_id': edge_id}) 'edge_id': edge_id})
@ -314,7 +315,7 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
edge_id, int(ref_vcns_rule_id), fwr_req)[0] edge_id, int(ref_vcns_rule_id), fwr_req)[0]
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to add firewall rule above: " LOG.exception(_LE("Failed to add firewall rule above: "
"%(rule_id)s with edge_id: %(edge_id)s"), "%(rule_id)s with edge_id: %(edge_id)s"),
{'rule_id': ref_vcns_rule_id, {'rule_id': ref_vcns_rule_id,
'edge_id': edge_id}) 'edge_id': edge_id})
@ -325,7 +326,7 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
edge_id, fwr_req)[0] edge_id, fwr_req)[0]
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to append a firewall rule" LOG.exception(_LE("Failed to append a firewall rule"
"with edge_id: %s"), edge_id) "with edge_id: %s"), edge_id)
objuri = header['location'] objuri = header['location']

View File

@ -14,6 +14,7 @@
from oslo.utils import excutils from oslo.utils import excutils
from neutron.i18n import _LE, _LW
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.plugins.vmware.vshield.common import ( from neutron.plugins.vmware.vshield.common import (
exceptions as vcns_exc) exceptions as vcns_exc)
@ -62,9 +63,9 @@ class EdgeIPsecVpnDriver():
ikepolicy['encryption_algorithm'] != ipsecpolicy[ ikepolicy['encryption_algorithm'] != ipsecpolicy[
'encryption_algorithm'] or 'encryption_algorithm'] or
ikepolicy['pfs'] != ipsecpolicy['pfs']): ikepolicy['pfs'] != ipsecpolicy['pfs']):
msg = _("IKEPolicy and IPsecPolicy should have consistent " LOG.warning(_LW(
"auth_algorithm, encryption_algorithm and pfs for VSE!") "IKEPolicy and IPsecPolicy should have consistent "
LOG.warning(msg) "auth_algorithm, encryption_algorithm and pfs for VSE!"))
# Check whether encryption_algorithm is allowed. # Check whether encryption_algorithm is allowed.
encryption_algorithm = ENCRYPTION_ALGORITHM_MAP.get( encryption_algorithm = ENCRYPTION_ALGORITHM_MAP.get(
@ -134,17 +135,18 @@ class EdgeIPsecVpnDriver():
self.vcns.update_ipsec_config(edge_id, ipsec_config) self.vcns.update_ipsec_config(edge_id, ipsec_config)
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update ipsec vpn configuration " LOG.exception(_LE("Failed to update ipsec vpn "
"with edge_id: %s"), edge_id) "configuration with edge_id: %s"),
edge_id)
def delete_ipsec_config(self, edge_id): def delete_ipsec_config(self, edge_id):
try: try:
self.vcns.delete_ipsec_config(edge_id) self.vcns.delete_ipsec_config(edge_id)
except vcns_exc.ResourceNotFound: except vcns_exc.ResourceNotFound:
LOG.warning(_("IPsec config not found on edge: %s"), edge_id) LOG.warning(_LW("IPsec config not found on edge: %s"), edge_id)
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete ipsec vpn configuration " LOG.exception(_LE("Failed to delete ipsec vpn configuration "
"with edge_id: %s"), edge_id) "with edge_id: %s"), edge_id)
def get_ipsec_config(self, edge_id): def get_ipsec_config(self, edge_id):

View File

@ -14,6 +14,7 @@
from oslo.utils import excutils from oslo.utils import excutils
from neutron.i18n import _LE
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.plugins.vmware.dbexts import vcns_db from neutron.plugins.vmware.dbexts import vcns_db
from neutron.plugins.vmware.vshield.common import ( from neutron.plugins.vmware.vshield.common import (
@ -176,7 +177,7 @@ class EdgeLbDriver():
edge_id, app_profile) edge_id, app_profile)
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create app profile on edge: %s"), LOG.exception(_LE("Failed to create app profile on edge: %s"),
edge_id) edge_id)
objuri = header['location'] objuri = header['location']
app_profileid = objuri[objuri.rfind("/") + 1:] app_profileid = objuri[objuri.rfind("/") + 1:]
@ -187,7 +188,7 @@ class EdgeLbDriver():
edge_id, vip_new) edge_id, vip_new)
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create vip on vshield edge: %s"), LOG.exception(_LE("Failed to create vip on vshield edge: %s"),
edge_id) edge_id)
self.vcns.delete_app_profile(edge_id, app_profileid) self.vcns.delete_app_profile(edge_id, app_profileid)
objuri = header['location'] objuri = header['location']
@ -222,7 +223,7 @@ class EdgeLbDriver():
response = self.vcns.get_vip(edge_id, vip_vseid)[1] response = self.vcns.get_vip(edge_id, vip_vseid)[1]
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to get vip on edge")) LOG.exception(_LE("Failed to get vip on edge"))
return self._restore_lb_vip(context, edge_id, response) return self._restore_lb_vip(context, edge_id, response)
def update_vip(self, context, vip, session_persistence_update=True): def update_vip(self, context, vip, session_persistence_update=True):
@ -239,15 +240,15 @@ class EdgeLbDriver():
edge_id, app_profileid, app_profile) edge_id, app_profileid, app_profile)
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update app profile on " LOG.exception(_LE("Failed to update app profile on "
"edge: %s") % edge_id) "edge: %s"), edge_id)
vip_new = self._convert_lb_vip(context, edge_id, vip, app_profileid) vip_new = self._convert_lb_vip(context, edge_id, vip, app_profileid)
try: try:
self.vcns.update_vip(edge_id, vip_vseid, vip_new) self.vcns.update_vip(edge_id, vip_vseid, vip_new)
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update vip on edge: %s") % edge_id) LOG.exception(_LE("Failed to update vip on edge: %s"), edge_id)
def delete_vip(self, context, id): def delete_vip(self, context, id):
vip_binding = self._get_vip_binding(context.session, id) vip_binding = self._get_vip_binding(context.session, id)
@ -258,18 +259,18 @@ class EdgeLbDriver():
try: try:
self.vcns.delete_vip(edge_id, vip_vseid) self.vcns.delete_vip(edge_id, vip_vseid)
except vcns_exc.ResourceNotFound: except vcns_exc.ResourceNotFound:
LOG.exception(_("vip not found on edge: %s") % edge_id) LOG.exception(_LE("vip not found on edge: %s"), edge_id)
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete vip on edge: %s") % edge_id) LOG.exception(_LE("Failed to delete vip on edge: %s"), edge_id)
try: try:
self.vcns.delete_app_profile(edge_id, app_profileid) self.vcns.delete_app_profile(edge_id, app_profileid)
except vcns_exc.ResourceNotFound: except vcns_exc.ResourceNotFound:
LOG.exception(_("app profile not found on edge: %s") % edge_id) LOG.exception(_LE("app profile not found on edge: %s"), edge_id)
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete app profile on edge: %s") % LOG.exception(_LE("Failed to delete app profile on edge: %s"),
edge_id) edge_id)
vcns_db.delete_vcns_edge_vip_binding(context.session, id) vcns_db.delete_vcns_edge_vip_binding(context.session, id)
@ -280,7 +281,7 @@ class EdgeLbDriver():
header = self.vcns.create_pool(edge_id, pool_new)[0] header = self.vcns.create_pool(edge_id, pool_new)[0]
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create pool")) LOG.exception(_LE("Failed to create pool"))
objuri = header['location'] objuri = header['location']
pool_vseid = objuri[objuri.rfind("/") + 1:] pool_vseid = objuri[objuri.rfind("/") + 1:]
@ -307,7 +308,7 @@ class EdgeLbDriver():
response = self.vcns.get_pool(edge_id, pool_vseid)[1] response = self.vcns.get_pool(edge_id, pool_vseid)[1]
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to get pool on edge")) LOG.exception(_LE("Failed to get pool on edge"))
return self._restore_lb_pool(context, edge_id, response) return self._restore_lb_pool(context, edge_id, response)
def update_pool(self, context, edge_id, pool, members): def update_pool(self, context, edge_id, pool, members):
@ -319,7 +320,7 @@ class EdgeLbDriver():
self.vcns.update_pool(edge_id, pool_vseid, pool_new) self.vcns.update_pool(edge_id, pool_vseid, pool_new)
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool")) LOG.exception(_LE("Failed to update pool"))
def delete_pool(self, context, id, edge_id): def delete_pool(self, context, id, edge_id):
pool_binding = vcns_db.get_vcns_edge_pool_binding( pool_binding = vcns_db.get_vcns_edge_pool_binding(
@ -329,7 +330,7 @@ class EdgeLbDriver():
self.vcns.delete_pool(edge_id, pool_vseid) self.vcns.delete_pool(edge_id, pool_vseid)
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete pool")) LOG.exception(_LE("Failed to delete pool"))
vcns_db.delete_vcns_edge_pool_binding( vcns_db.delete_vcns_edge_pool_binding(
context.session, id, edge_id) context.session, id, edge_id)
@ -339,7 +340,7 @@ class EdgeLbDriver():
header = self.vcns.create_health_monitor(edge_id, monitor_new)[0] header = self.vcns.create_health_monitor(edge_id, monitor_new)[0]
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create monitor on edge: %s"), LOG.exception(_LE("Failed to create monitor on edge: %s"),
edge_id) edge_id)
objuri = header['location'] objuri = header['location']
@ -367,7 +368,7 @@ class EdgeLbDriver():
response = self.vcns.get_health_monitor(edge_id, monitor_vseid)[1] response = self.vcns.get_health_monitor(edge_id, monitor_vseid)[1]
except vcns_exc.VcnsApiException as e: except vcns_exc.VcnsApiException as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to get monitor on edge: %s"), LOG.exception(_LE("Failed to get monitor on edge: %s"),
e.response) e.response)
return self._restore_lb_monitor(context, edge_id, response) return self._restore_lb_monitor(context, edge_id, response)
@ -384,7 +385,7 @@ class EdgeLbDriver():
edge_id, monitor_vseid, monitor_new) edge_id, monitor_vseid, monitor_new)
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update monitor on edge: %s"), LOG.exception(_LE("Failed to update monitor on edge: %s"),
edge_id) edge_id)
def delete_health_monitor(self, context, id, edge_id): def delete_health_monitor(self, context, id, edge_id):
@ -395,6 +396,6 @@ class EdgeLbDriver():
self.vcns.delete_health_monitor(edge_id, monitor_vseid) self.vcns.delete_health_monitor(edge_id, monitor_vseid)
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete monitor")) LOG.exception(_LE("Failed to delete monitor"))
vcns_db.delete_vcns_edge_monitor_binding( vcns_db.delete_vcns_edge_monitor_binding(
context.session, id, edge_id) context.session, id, edge_id)

View File

@ -20,6 +20,7 @@ from eventlet import event
from eventlet import greenthread from eventlet import greenthread
from neutron.common import exceptions from neutron.common import exceptions
from neutron.i18n import _LE, _LI
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall from neutron.openstack.common import loopingcall
from neutron.plugins.vmware.vshield.tasks import constants from neutron.plugins.vmware.vshield.tasks import constants
@ -93,12 +94,11 @@ class Task():
try: try:
func(self) func(self)
except Exception: except Exception:
msg = _("Task %(task)s encountered exception in %(func)s " LOG.exception(_LE("Task %(task)s encountered exception in "
"at state %(state)s") % { "%(func)s at state %(state)s"),
'task': str(self), {'task': str(self),
'func': str(func), 'func': str(func),
'state': state} 'state': state})
LOG.exception(msg)
self._move_state(state) self._move_state(state)
@ -179,16 +179,14 @@ class TaskManager():
def _execute(self, task): def _execute(self, task):
"""Execute task.""" """Execute task."""
msg = _("Start task %s") % str(task) LOG.debug("Start task %s", str(task))
LOG.debug(msg)
task._start() task._start()
try: try:
status = task._execute_callback(task) status = task._execute_callback(task)
except Exception: except Exception:
msg = _("Task %(task)s encountered exception in %(cb)s") % { LOG.exception(_LE("Task %(task)s encountered exception in %(cb)s"),
'task': str(task), {'task': str(task),
'cb': str(task._execute_callback)} 'cb': str(task._execute_callback)})
LOG.exception(msg)
status = constants.TaskStatus.ERROR status = constants.TaskStatus.ERROR
LOG.debug("Task %(task)s return %(status)s", { LOG.debug("Task %(task)s return %(status)s", {
@ -205,10 +203,9 @@ class TaskManager():
try: try:
task._result_callback(task) task._result_callback(task)
except Exception: except Exception:
msg = _("Task %(task)s encountered exception in %(cb)s") % { LOG.exception(_LE("Task %(task)s encountered exception in %(cb)s"),
'task': str(task), {'task': str(task),
'cb': str(task._result_callback)} 'cb': str(task._result_callback)})
LOG.exception(msg)
LOG.debug("Task %(task)s return %(status)s", LOG.debug("Task %(task)s return %(status)s",
{'task': str(task), 'status': task.status}) {'task': str(task), 'status': task.status})
@ -228,10 +225,10 @@ class TaskManager():
try: try:
status = task._status_callback(task) status = task._status_callback(task)
except Exception: except Exception:
msg = _("Task %(task)s encountered exception in %(cb)s") % { LOG.exception(_LE("Task %(task)s encountered exception in "
'task': str(task), "%(cb)s"),
'cb': str(task._status_callback)} {'task': str(task),
LOG.exception(msg) 'cb': str(task._status_callback)})
status = constants.TaskStatus.ERROR status = constants.TaskStatus.ERROR
task._update_status(status) task._update_status(status)
if status != constants.TaskStatus.PENDING: if status != constants.TaskStatus.PENDING:
@ -293,7 +290,7 @@ class TaskManager():
if self._stopped: if self._stopped:
# Gracefully terminate this thread if the _stopped # Gracefully terminate this thread if the _stopped
# attribute was set to true # attribute was set to true
LOG.info(_("Stopping TaskManager")) LOG.info(_LI("Stopping TaskManager"))
break break
# get a task from queue, or timeout for periodic status check # get a task from queue, or timeout for periodic status check
@ -318,7 +315,7 @@ class TaskManager():
else: else:
self._enqueue(task) self._enqueue(task)
except Exception: except Exception:
LOG.exception(_("TaskManager terminating because " LOG.exception(_LE("TaskManager terminating because "
"of an exception")) "of an exception"))
break break
@ -340,7 +337,7 @@ class TaskManager():
if self._monitor_busy: if self._monitor_busy:
self._monitor.wait() self._monitor.wait()
self._abort() self._abort()
LOG.info(_("TaskManager terminated")) LOG.info(_LI("TaskManager terminated"))
def has_pending_task(self): def has_pending_task(self):
if self._tasks_queue or self._tasks or self._main_thread_exec_task: if self._tasks_queue or self._tasks or self._main_thread_exec_task:
@ -372,7 +369,7 @@ class TaskManager():
try: try:
self._check_pending_tasks() self._check_pending_tasks()
except Exception: except Exception:
LOG.exception(_("Exception in _check_pending_tasks")) LOG.exception(_LE("Exception in _check_pending_tasks"))
self._monitor_busy = False self._monitor_busy = False
if self._thread is not None: if self._thread is not None: