Drop log translations
Log messages are no longer being translated. This removes all use of the _LE, _LI, and _LW translation markers to simplify logging and to avoid confusion with new contributions. See: http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html Change-Id: I9d37ae28a3fecbe910e60dc7f22e229a7b65940c
This commit is contained in:
parent
72af9f1b34
commit
6228a06399
3
tox.ini
3
tox.ini
@ -97,7 +97,8 @@ commands = sphinx-build -W -b html doc/source doc/build/html
|
|||||||
# H904 Wrap long lines in parentheses instead of a backslash
|
# H904 Wrap long lines in parentheses instead of a backslash
|
||||||
# TODO(dougwig) -- uncomment this to test for remaining linkages
|
# TODO(dougwig) -- uncomment this to test for remaining linkages
|
||||||
# N530 direct neutron imports not allowed
|
# N530 direct neutron imports not allowed
|
||||||
ignore = E125,E126,E128,E129,E265,H305,H307,H404,H405,H904,N530
|
# N531 translations hints
|
||||||
|
ignore = E125,E126,E128,E129,E265,H305,H307,H404,H405,H904,N530,N531
|
||||||
show-source = true
|
show-source = true
|
||||||
builtins = _
|
builtins = _
|
||||||
exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,.ropeproject
|
exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,.ropeproject
|
||||||
|
@ -27,16 +27,6 @@ _C = _translators.contextual_form
|
|||||||
# The plural translation function using the name "_P"
|
# The plural translation function using the name "_P"
|
||||||
_P = _translators.plural_form
|
_P = _translators.plural_form
|
||||||
|
|
||||||
# Translators for log levels.
|
|
||||||
#
|
|
||||||
# The abbreviated names are meant to reflect the usual use of a short
|
|
||||||
# name like '_'. The "L" is for "log" and the other letter comes from
|
|
||||||
# the level.
|
|
||||||
_LI = _translators.log_info
|
|
||||||
_LW = _translators.log_warning
|
|
||||||
_LE = _translators.log_error
|
|
||||||
_LC = _translators.log_critical
|
|
||||||
|
|
||||||
|
|
||||||
def get_available_languages():
|
def get_available_languages():
|
||||||
return oslo_i18n.get_available_languages(DOMAIN)
|
return oslo_i18n.get_available_languages(DOMAIN)
|
||||||
|
@ -22,7 +22,6 @@ from oslo_log import log as logging
|
|||||||
import six
|
import six
|
||||||
from six.moves import http_client as httplib
|
from six.moves import http_client as httplib
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI, _LW
|
|
||||||
from vmware_nsx import api_client
|
from vmware_nsx import api_client
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -101,15 +100,15 @@ class ApiClientBase(object):
|
|||||||
api_providers are configured.
|
api_providers are configured.
|
||||||
'''
|
'''
|
||||||
if not self._api_providers:
|
if not self._api_providers:
|
||||||
LOG.warning(_LW("[%d] no API providers currently available."), rid)
|
LOG.warning("[%d] no API providers currently available.", rid)
|
||||||
return None
|
return None
|
||||||
if self._conn_pool.empty():
|
if self._conn_pool.empty():
|
||||||
LOG.debug("[%d] Waiting to acquire API client connection.", rid)
|
LOG.debug("[%d] Waiting to acquire API client connection.", rid)
|
||||||
priority, conn = self._conn_pool.get()
|
priority, conn = self._conn_pool.get()
|
||||||
now = time.time()
|
now = time.time()
|
||||||
if getattr(conn, 'last_used', now) < now - cfg.CONF.conn_idle_timeout:
|
if getattr(conn, 'last_used', now) < now - cfg.CONF.conn_idle_timeout:
|
||||||
LOG.info(_LI("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f "
|
LOG.info("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f "
|
||||||
"seconds; reconnecting."),
|
"seconds; reconnecting.",
|
||||||
{'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn),
|
{'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn),
|
||||||
'sec': now - conn.last_used})
|
'sec': now - conn.last_used})
|
||||||
conn = self._create_connection(*self._conn_params(conn))
|
conn = self._create_connection(*self._conn_params(conn))
|
||||||
@ -149,8 +148,8 @@ class ApiClientBase(object):
|
|||||||
priority = http_conn.priority
|
priority = http_conn.priority
|
||||||
if bad_state:
|
if bad_state:
|
||||||
# Reconnect to provider.
|
# Reconnect to provider.
|
||||||
LOG.warning(_LW("[%(rid)d] Connection returned in bad state, "
|
LOG.warning("[%(rid)d] Connection returned in bad state, "
|
||||||
"reconnecting to %(conn)s"),
|
"reconnecting to %(conn)s",
|
||||||
{'rid': rid,
|
{'rid': rid,
|
||||||
'conn': api_client.ctrl_conn_to_str(http_conn)})
|
'conn': api_client.ctrl_conn_to_str(http_conn)})
|
||||||
http_conn = self._create_connection(*self._conn_params(http_conn))
|
http_conn = self._create_connection(*self._conn_params(http_conn))
|
||||||
@ -181,7 +180,7 @@ class ApiClientBase(object):
|
|||||||
|
|
||||||
data = self._get_provider_data(conn)
|
data = self._get_provider_data(conn)
|
||||||
if data is None:
|
if data is None:
|
||||||
LOG.error(_LE("Login request for an invalid connection: '%s'"),
|
LOG.error("Login request for an invalid connection: '%s'",
|
||||||
api_client.ctrl_conn_to_str(conn))
|
api_client.ctrl_conn_to_str(conn))
|
||||||
return
|
return
|
||||||
provider_sem = data[0]
|
provider_sem = data[0]
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from six.moves import http_client as httplib
|
from six.moves import http_client as httplib
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE
|
|
||||||
from vmware_nsx.api_client import base
|
from vmware_nsx.api_client import base
|
||||||
from vmware_nsx.api_client import eventlet_client
|
from vmware_nsx.api_client import eventlet_client
|
||||||
from vmware_nsx.api_client import eventlet_request
|
from vmware_nsx.api_client import eventlet_request
|
||||||
@ -101,7 +100,7 @@ class NsxApiClient(eventlet_client.EventletApiClient):
|
|||||||
|
|
||||||
if response is None:
|
if response is None:
|
||||||
# Timeout.
|
# Timeout.
|
||||||
LOG.error(_LE('Request timed out: %(method)s to %(url)s'),
|
LOG.error('Request timed out: %(method)s to %(url)s',
|
||||||
{'method': method, 'url': url})
|
{'method': method, 'url': url})
|
||||||
raise exception.RequestTimeout()
|
raise exception.RequestTimeout()
|
||||||
|
|
||||||
@ -112,15 +111,15 @@ class NsxApiClient(eventlet_client.EventletApiClient):
|
|||||||
# Fail-fast: Check for exception conditions and raise the
|
# Fail-fast: Check for exception conditions and raise the
|
||||||
# appropriate exceptions for known error codes.
|
# appropriate exceptions for known error codes.
|
||||||
if status in exception.ERROR_MAPPINGS:
|
if status in exception.ERROR_MAPPINGS:
|
||||||
LOG.error(_LE("Received error code: %s"), status)
|
LOG.error("Received error code: %s", status)
|
||||||
LOG.error(_LE("Server Error Message: %s"), response.body)
|
LOG.error("Server Error Message: %s", response.body)
|
||||||
exception.ERROR_MAPPINGS[status](response)
|
exception.ERROR_MAPPINGS[status](response)
|
||||||
|
|
||||||
# Continue processing for non-error condition.
|
# Continue processing for non-error condition.
|
||||||
if (status != httplib.OK and status != httplib.CREATED
|
if (status != httplib.OK and status != httplib.CREATED
|
||||||
and status != httplib.NO_CONTENT):
|
and status != httplib.NO_CONTENT):
|
||||||
LOG.error(_LE("%(method)s to %(url)s, unexpected response code: "
|
LOG.error("%(method)s to %(url)s, unexpected response code: "
|
||||||
"%(status)d (content = '%(body)s')"),
|
"%(status)d (content = '%(body)s')",
|
||||||
{'method': method, 'url': url,
|
{'method': method, 'url': url,
|
||||||
'status': response.status, 'body': response.body})
|
'status': response.status, 'body': response.body})
|
||||||
return None
|
return None
|
||||||
@ -136,6 +135,6 @@ class NsxApiClient(eventlet_client.EventletApiClient):
|
|||||||
# one of the server that responds.
|
# one of the server that responds.
|
||||||
self.request('GET', '/ws.v1/control-cluster/node')
|
self.request('GET', '/ws.v1/control-cluster/node')
|
||||||
if not self._version:
|
if not self._version:
|
||||||
LOG.error(_LE('Unable to determine NSX version. '
|
LOG.error('Unable to determine NSX version. '
|
||||||
'Plugin might not work as expected.'))
|
'Plugin might not work as expected.')
|
||||||
return self._version
|
return self._version
|
||||||
|
@ -21,7 +21,6 @@ import eventlet
|
|||||||
eventlet.monkey_patch()
|
eventlet.monkey_patch()
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE
|
|
||||||
from vmware_nsx.api_client import base
|
from vmware_nsx.api_client import base
|
||||||
from vmware_nsx.api_client import eventlet_request
|
from vmware_nsx.api_client import eventlet_request
|
||||||
|
|
||||||
@ -143,7 +142,7 @@ class EventletApiClient(base.ApiClientBase):
|
|||||||
ret = g.join()
|
ret = g.join()
|
||||||
if ret:
|
if ret:
|
||||||
if isinstance(ret, Exception):
|
if isinstance(ret, Exception):
|
||||||
LOG.error(_LE('Login error "%s"'), ret)
|
LOG.error('Login error "%s"', ret)
|
||||||
raise ret
|
raise ret
|
||||||
|
|
||||||
cookie = ret.getheader("Set-Cookie")
|
cookie = ret.getheader("Set-Cookie")
|
||||||
|
@ -21,7 +21,7 @@ from oslo_serialization import jsonutils
|
|||||||
from six.moves import http_client as httplib
|
from six.moves import http_client as httplib
|
||||||
from six.moves.urllib import parse
|
from six.moves.urllib import parse
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LI, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.api_client import request
|
from vmware_nsx.api_client import request
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -119,7 +119,7 @@ class EventletApiRequest(request.ApiRequest):
|
|||||||
with eventlet.timeout.Timeout(self._request_timeout, False):
|
with eventlet.timeout.Timeout(self._request_timeout, False):
|
||||||
return self._handle_request()
|
return self._handle_request()
|
||||||
|
|
||||||
LOG.info(_LI('[%d] Request timeout.'), self._rid())
|
LOG.info('[%d] Request timeout.', self._rid())
|
||||||
self._request_error = Exception(_('Request timeout'))
|
self._request_error = Exception(_('Request timeout'))
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
@ -153,8 +153,8 @@ class EventletApiRequest(request.ApiRequest):
|
|||||||
self._request_error = None
|
self._request_error = None
|
||||||
response = req
|
response = req
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI('[%(rid)d] Error while handling request: '
|
LOG.info('[%(rid)d] Error while handling request: '
|
||||||
'%(req)s'),
|
'%(req)s',
|
||||||
{'rid': self._rid(), 'req': req})
|
{'rid': self._rid(), 'req': req})
|
||||||
self._request_error = req
|
self._request_error = req
|
||||||
response = None
|
response = None
|
||||||
@ -210,7 +210,7 @@ class GetApiProvidersRequestEventlet(EventletApiRequest):
|
|||||||
ret.append(_provider_from_listen_addr(addr))
|
ret.append(_provider_from_listen_addr(addr))
|
||||||
return ret
|
return ret
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW("[%(rid)d] Failed to parse API provider: %(e)s"),
|
LOG.warning("[%(rid)d] Failed to parse API provider: %(e)s",
|
||||||
{'rid': self._rid(), 'e': e})
|
{'rid': self._rid(), 'e': e})
|
||||||
# intentionally fall through
|
# intentionally fall through
|
||||||
return None
|
return None
|
||||||
|
@ -27,7 +27,7 @@ import six
|
|||||||
from six.moves import http_client as httplib
|
from six.moves import http_client as httplib
|
||||||
import six.moves.urllib.parse as urlparse
|
import six.moves.urllib.parse as urlparse
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LI, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx import api_client
|
from vmware_nsx import api_client
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -122,8 +122,8 @@ class ApiRequest(object):
|
|||||||
conn.request(self._method, url, self._body, headers)
|
conn.request(self._method, url, self._body, headers)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.warning(_LW("[%(rid)d] Exception issuing request: "
|
LOG.warning("[%(rid)d] Exception issuing request: "
|
||||||
"%(e)s"),
|
"%(e)s",
|
||||||
{'rid': self._rid(), 'e': e})
|
{'rid': self._rid(), 'e': e})
|
||||||
|
|
||||||
response = conn.getresponse()
|
response = conn.getresponse()
|
||||||
@ -158,8 +158,8 @@ class ApiRequest(object):
|
|||||||
httplib.TEMPORARY_REDIRECT]:
|
httplib.TEMPORARY_REDIRECT]:
|
||||||
break
|
break
|
||||||
elif redirects >= self._redirects:
|
elif redirects >= self._redirects:
|
||||||
LOG.info(_LI("[%d] Maximum redirects exceeded, aborting "
|
LOG.info("[%d] Maximum redirects exceeded, aborting "
|
||||||
"request"), self._rid())
|
"request", self._rid())
|
||||||
break
|
break
|
||||||
redirects += 1
|
redirects += 1
|
||||||
|
|
||||||
@ -168,7 +168,7 @@ class ApiRequest(object):
|
|||||||
if url is None:
|
if url is None:
|
||||||
response.status = httplib.INTERNAL_SERVER_ERROR
|
response.status = httplib.INTERNAL_SERVER_ERROR
|
||||||
break
|
break
|
||||||
LOG.info(_LI("[%(rid)d] Redirecting request to: %(conn)s"),
|
LOG.info("[%(rid)d] Redirecting request to: %(conn)s",
|
||||||
{'rid': self._rid(),
|
{'rid': self._rid(),
|
||||||
'conn': self._request_str(conn, url)})
|
'conn': self._request_str(conn, url)})
|
||||||
# yield here, just in case we are not out of the loop yet
|
# yield here, just in case we are not out of the loop yet
|
||||||
@ -181,8 +181,8 @@ class ApiRequest(object):
|
|||||||
# queue.
|
# queue.
|
||||||
if (response.status == httplib.INTERNAL_SERVER_ERROR and
|
if (response.status == httplib.INTERNAL_SERVER_ERROR and
|
||||||
response.status > httplib.NOT_IMPLEMENTED):
|
response.status > httplib.NOT_IMPLEMENTED):
|
||||||
LOG.warning(_LW("[%(rid)d] Request '%(method)s %(url)s' "
|
LOG.warning("[%(rid)d] Request '%(method)s %(url)s' "
|
||||||
"received: %(status)s"),
|
"received: %(status)s",
|
||||||
{'rid': self._rid(), 'method': self._method,
|
{'rid': self._rid(), 'method': self._method,
|
||||||
'url': self._url, 'status': response.status})
|
'url': self._url, 'status': response.status})
|
||||||
raise Exception(_('Server error return: %s'), response.status)
|
raise Exception(_('Server error return: %s'), response.status)
|
||||||
@ -197,8 +197,8 @@ class ApiRequest(object):
|
|||||||
msg = str(e)
|
msg = str(e)
|
||||||
if response is None:
|
if response is None:
|
||||||
elapsed_time = time.time() - issued_time
|
elapsed_time = time.time() - issued_time
|
||||||
LOG.warning(_LW("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
|
LOG.warning("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
|
||||||
"(%(elapsed)s seconds)"),
|
"(%(elapsed)s seconds)",
|
||||||
{'rid': self._rid(),
|
{'rid': self._rid(),
|
||||||
'conn': self._request_str(conn, url),
|
'conn': self._request_str(conn, url),
|
||||||
'msg': msg, 'elapsed': elapsed_time})
|
'msg': msg, 'elapsed': elapsed_time})
|
||||||
@ -232,8 +232,8 @@ class ApiRequest(object):
|
|||||||
url = value
|
url = value
|
||||||
break
|
break
|
||||||
if not url:
|
if not url:
|
||||||
LOG.warning(_LW("[%d] Received redirect status without location "
|
LOG.warning("[%d] Received redirect status without location "
|
||||||
"header field"), self._rid())
|
"header field", self._rid())
|
||||||
return (conn, None)
|
return (conn, None)
|
||||||
# Accept location with the following format:
|
# Accept location with the following format:
|
||||||
# 1. /path, redirect to same node
|
# 1. /path, redirect to same node
|
||||||
@ -249,13 +249,13 @@ class ApiRequest(object):
|
|||||||
url = result.path
|
url = result.path
|
||||||
return (conn, url) # case 1
|
return (conn, url) # case 1
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("[%(rid)d] Received invalid redirect "
|
LOG.warning("[%(rid)d] Received invalid redirect "
|
||||||
"location: '%(url)s'"),
|
"location: '%(url)s'",
|
||||||
{'rid': self._rid(), 'url': url})
|
{'rid': self._rid(), 'url': url})
|
||||||
return (conn, None) # case 3
|
return (conn, None) # case 3
|
||||||
elif result.scheme not in ["http", "https"] or not result.hostname:
|
elif result.scheme not in ["http", "https"] or not result.hostname:
|
||||||
LOG.warning(_LW("[%(rid)d] Received malformed redirect "
|
LOG.warning("[%(rid)d] Received malformed redirect "
|
||||||
"location: %(url)s"),
|
"location: %(url)s",
|
||||||
{'rid': self._rid(), 'url': url})
|
{'rid': self._rid(), 'url': url})
|
||||||
return (conn, None) # case 3
|
return (conn, None) # case 3
|
||||||
# case 2, redirect location includes a scheme
|
# case 2, redirect location includes a scheme
|
||||||
|
@ -17,9 +17,6 @@
|
|||||||
|
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LW
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@ -30,8 +27,8 @@ def find_version(headers):
|
|||||||
if header_name == 'server':
|
if header_name == 'server':
|
||||||
return Version(header_value.split('/')[1])
|
return Version(header_value.split('/')[1])
|
||||||
except IndexError:
|
except IndexError:
|
||||||
LOG.warning(_LW("Unable to fetch NSX version from response "
|
LOG.warning("Unable to fetch NSX version from response "
|
||||||
"headers :%s"), headers)
|
"headers :%s", headers)
|
||||||
|
|
||||||
|
|
||||||
class Version(object):
|
class Version(object):
|
||||||
|
@ -18,7 +18,7 @@ from oslo_log import log as logging
|
|||||||
|
|
||||||
from neutron.db import l3_hamode_db
|
from neutron.db import l3_hamode_db
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
from vmware_nsx.dvs import dvs_utils
|
from vmware_nsx.dvs import dvs_utils
|
||||||
from vmware_nsx.extensions import routersize
|
from vmware_nsx.extensions import routersize
|
||||||
@ -843,9 +843,9 @@ def validate_nsxv_config_options():
|
|||||||
error = _("manager_uri, user, and password must be configured!")
|
error = _("manager_uri, user, and password must be configured!")
|
||||||
raise nsx_exc.NsxPluginException(err_msg=error)
|
raise nsx_exc.NsxPluginException(err_msg=error)
|
||||||
if cfg.CONF.nsxv.dvs_id is None:
|
if cfg.CONF.nsxv.dvs_id is None:
|
||||||
LOG.warning(_LW("dvs_id must be configured to support VLANs!"))
|
LOG.warning("dvs_id must be configured to support VLANs!")
|
||||||
if cfg.CONF.nsxv.vdn_scope_id is None:
|
if cfg.CONF.nsxv.vdn_scope_id is None:
|
||||||
LOG.warning(_LW("vdn_scope_id must be configured to support VXLANs!"))
|
LOG.warning("vdn_scope_id must be configured to support VXLANs!")
|
||||||
if cfg.CONF.nsxv.use_dvs_features and not dvs_utils.dvs_is_enabled(
|
if cfg.CONF.nsxv.use_dvs_features and not dvs_utils.dvs_is_enabled(
|
||||||
dvs_id=cfg.CONF.nsxv.dvs_id):
|
dvs_id=cfg.CONF.nsxv.dvs_id):
|
||||||
error = _("dvs host/vcenter credentials must be defined to use "
|
error = _("dvs host/vcenter credentials must be defined to use "
|
||||||
|
@ -18,8 +18,6 @@ from oslo_log import log
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
import stevedore
|
import stevedore
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@ -31,13 +29,13 @@ class ExtensionManager(stevedore.named.NamedExtensionManager):
|
|||||||
# the order in which the drivers are called.
|
# the order in which the drivers are called.
|
||||||
self.ordered_ext_drivers = []
|
self.ordered_ext_drivers = []
|
||||||
|
|
||||||
LOG.info(_LI("Configured extension driver names: %s"),
|
LOG.info("Configured extension driver names: %s",
|
||||||
cfg.CONF.nsx_extension_drivers)
|
cfg.CONF.nsx_extension_drivers)
|
||||||
super(ExtensionManager, self).__init__('vmware_nsx.extension_drivers',
|
super(ExtensionManager, self).__init__('vmware_nsx.extension_drivers',
|
||||||
cfg.CONF.nsx_extension_drivers,
|
cfg.CONF.nsx_extension_drivers,
|
||||||
invoke_on_load=True,
|
invoke_on_load=True,
|
||||||
name_order=True)
|
name_order=True)
|
||||||
LOG.info(_LI("Loaded extension driver names: %s"), self.names())
|
LOG.info("Loaded extension driver names: %s", self.names())
|
||||||
self._register_drivers()
|
self._register_drivers()
|
||||||
|
|
||||||
def _register_drivers(self):
|
def _register_drivers(self):
|
||||||
@ -48,13 +46,13 @@ class ExtensionManager(stevedore.named.NamedExtensionManager):
|
|||||||
"""
|
"""
|
||||||
for ext in self:
|
for ext in self:
|
||||||
self.ordered_ext_drivers.append(ext)
|
self.ordered_ext_drivers.append(ext)
|
||||||
LOG.info(_LI("Registered extension drivers: %s"),
|
LOG.info("Registered extension drivers: %s",
|
||||||
[driver.name for driver in self.ordered_ext_drivers])
|
[driver.name for driver in self.ordered_ext_drivers])
|
||||||
|
|
||||||
def initialize(self):
|
def initialize(self):
|
||||||
# Initialize each driver in the list.
|
# Initialize each driver in the list.
|
||||||
for driver in self.ordered_ext_drivers:
|
for driver in self.ordered_ext_drivers:
|
||||||
LOG.info(_LI("Initializing extension driver '%s'"), driver.name)
|
LOG.info("Initializing extension driver '%s'", driver.name)
|
||||||
driver.obj.initialize()
|
driver.obj.initialize()
|
||||||
|
|
||||||
def extension_aliases(self):
|
def extension_aliases(self):
|
||||||
@ -63,7 +61,7 @@ class ExtensionManager(stevedore.named.NamedExtensionManager):
|
|||||||
alias = driver.obj.extension_alias
|
alias = driver.obj.extension_alias
|
||||||
if alias:
|
if alias:
|
||||||
exts.append(alias)
|
exts.append(alias)
|
||||||
LOG.info(_LI("Got %(alias)s extension from driver '%(drv)s'"),
|
LOG.info("Got %(alias)s extension from driver '%(drv)s'",
|
||||||
{'alias': alias, 'drv': driver.name})
|
{'alias': alias, 'drv': driver.name})
|
||||||
return exts
|
return exts
|
||||||
|
|
||||||
@ -74,8 +72,8 @@ class ExtensionManager(stevedore.named.NamedExtensionManager):
|
|||||||
getattr(driver.obj, method_name)(plugin_context, data, result)
|
getattr(driver.obj, method_name)(plugin_context, data, result)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.info(_LI("Extension driver '%(name)s' failed in "
|
LOG.info("Extension driver '%(name)s' failed in "
|
||||||
"%(method)s"),
|
"%(method)s",
|
||||||
{'name': driver.name, 'method': method_name})
|
{'name': driver.name, 'method': method_name})
|
||||||
|
|
||||||
def process_create_network(self, plugin_context, data, result):
|
def process_create_network(self, plugin_context, data, result):
|
||||||
@ -113,8 +111,8 @@ class ExtensionManager(stevedore.named.NamedExtensionManager):
|
|||||||
try:
|
try:
|
||||||
getattr(driver.obj, method_name)(session, base_model, result)
|
getattr(driver.obj, method_name)(session, base_model, result)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_LE("Extension driver '%(name)s' failed in "
|
LOG.error("Extension driver '%(name)s' failed in "
|
||||||
"%(method)s"),
|
"%(method)s",
|
||||||
{'name': driver.name, 'method': method_name})
|
{'name': driver.name, 'method': method_name})
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@ from neutron_lib import exceptions as n_exc
|
|||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LW
|
|
||||||
from vmware_nsx.api_client import client
|
from vmware_nsx.api_client import client
|
||||||
from vmware_nsx.api_client import exception as api_exc
|
from vmware_nsx.api_client import exception as api_exc
|
||||||
from vmware_nsx.common import utils as vmw_utils
|
from vmware_nsx.common import utils as vmw_utils
|
||||||
@ -68,8 +67,8 @@ def get_nsx_switch_ids(session, cluster, neutron_network_id):
|
|||||||
# more than once for each network in Neutron's lifetime
|
# more than once for each network in Neutron's lifetime
|
||||||
nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id)
|
nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id)
|
||||||
if not nsx_switches:
|
if not nsx_switches:
|
||||||
LOG.warning(_LW("Unable to find NSX switches for Neutron network "
|
LOG.warning("Unable to find NSX switches for Neutron network "
|
||||||
"%s"), neutron_network_id)
|
"%s", neutron_network_id)
|
||||||
return
|
return
|
||||||
nsx_switch_ids = []
|
nsx_switch_ids = []
|
||||||
with session.begin(subtransactions=True):
|
with session.begin(subtransactions=True):
|
||||||
@ -115,7 +114,7 @@ def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
|
|||||||
# NOTE(salv-orlando): Not handling the case where more than one
|
# NOTE(salv-orlando): Not handling the case where more than one
|
||||||
# port is found with the same neutron port tag
|
# port is found with the same neutron port tag
|
||||||
if not nsx_ports:
|
if not nsx_ports:
|
||||||
LOG.warning(_LW("Unable to find NSX port for Neutron port %s"),
|
LOG.warning("Unable to find NSX port for Neutron port %s",
|
||||||
neutron_port_id)
|
neutron_port_id)
|
||||||
# This method is supposed to return a tuple
|
# This method is supposed to return a tuple
|
||||||
return None, None
|
return None, None
|
||||||
@ -155,12 +154,12 @@ def get_nsx_security_group_id(session, cluster, neutron_id):
|
|||||||
# NOTE(salv-orlando): Not handling the case where more than one
|
# NOTE(salv-orlando): Not handling the case where more than one
|
||||||
# security profile is found with the same neutron port tag
|
# security profile is found with the same neutron port tag
|
||||||
if not nsx_sec_profiles:
|
if not nsx_sec_profiles:
|
||||||
LOG.warning(_LW("Unable to find NSX security profile for Neutron "
|
LOG.warning("Unable to find NSX security profile for Neutron "
|
||||||
"security group %s"), neutron_id)
|
"security group %s", neutron_id)
|
||||||
return
|
return
|
||||||
elif len(nsx_sec_profiles) > 1:
|
elif len(nsx_sec_profiles) > 1:
|
||||||
LOG.warning(_LW("Multiple NSX security profiles found for Neutron "
|
LOG.warning("Multiple NSX security profiles found for Neutron "
|
||||||
"security group %s"), neutron_id)
|
"security group %s", neutron_id)
|
||||||
nsx_sec_profile = nsx_sec_profiles[0]
|
nsx_sec_profile = nsx_sec_profiles[0]
|
||||||
nsx_id = nsx_sec_profile['uuid']
|
nsx_id = nsx_sec_profile['uuid']
|
||||||
with session.begin(subtransactions=True):
|
with session.begin(subtransactions=True):
|
||||||
@ -192,7 +191,7 @@ def get_nsx_router_id(session, cluster, neutron_router_id):
|
|||||||
# NOTE(salv-orlando): Not handling the case where more than one
|
# NOTE(salv-orlando): Not handling the case where more than one
|
||||||
# port is found with the same neutron port tag
|
# port is found with the same neutron port tag
|
||||||
if not nsx_routers:
|
if not nsx_routers:
|
||||||
LOG.warning(_LW("Unable to find NSX router for Neutron router %s"),
|
LOG.warning("Unable to find NSX router for Neutron router %s",
|
||||||
neutron_router_id)
|
neutron_router_id)
|
||||||
return
|
return
|
||||||
nsx_router = nsx_routers[0]
|
nsx_router = nsx_routers[0]
|
||||||
@ -249,12 +248,12 @@ def get_nsx_device_statuses(cluster, tenant_id):
|
|||||||
except api_exc.NsxApiException:
|
except api_exc.NsxApiException:
|
||||||
# Do not make a NSX API exception fatal
|
# Do not make a NSX API exception fatal
|
||||||
if tenant_id:
|
if tenant_id:
|
||||||
LOG.warning(_LW("Unable to retrieve operational status for "
|
LOG.warning("Unable to retrieve operational status for "
|
||||||
"gateway devices belonging to tenant: %s"),
|
"gateway devices belonging to tenant: %s",
|
||||||
tenant_id)
|
tenant_id)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Unable to retrieve operational status for "
|
LOG.warning("Unable to retrieve operational status for "
|
||||||
"gateway devices"))
|
"gateway devices")
|
||||||
|
|
||||||
|
|
||||||
def _convert_bindings_to_nsx_transport_zones(bindings):
|
def _convert_bindings_to_nsx_transport_zones(bindings):
|
||||||
|
@ -30,7 +30,7 @@ from neutron.db.models import l3 as l3_db
|
|||||||
from neutron.db import models_v2
|
from neutron.db import models_v2
|
||||||
from neutron.extensions import l3
|
from neutron.extensions import l3
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE, _LI, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.api_client import exception as api_exc
|
from vmware_nsx.api_client import exception as api_exc
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
from vmware_nsx.common import nsx_utils
|
from vmware_nsx.common import nsx_utils
|
||||||
@ -273,8 +273,8 @@ class NsxSynchronizer(object):
|
|||||||
# TODO(salv-orlando): We should be catching
|
# TODO(salv-orlando): We should be catching
|
||||||
# api_exc.ResourceNotFound here
|
# api_exc.ResourceNotFound here
|
||||||
# The logical switch was not found
|
# The logical switch was not found
|
||||||
LOG.warning(_LW("Logical switch for neutron network %s not "
|
LOG.warning("Logical switch for neutron network %s not "
|
||||||
"found on NSX."), neutron_network_data['id'])
|
"found on NSX.", neutron_network_data['id'])
|
||||||
lswitches = []
|
lswitches = []
|
||||||
else:
|
else:
|
||||||
for lswitch in lswitches:
|
for lswitch in lswitches:
|
||||||
@ -360,8 +360,8 @@ class NsxSynchronizer(object):
|
|||||||
# NOTE(salv-orlando): We should be catching
|
# NOTE(salv-orlando): We should be catching
|
||||||
# api_exc.ResourceNotFound here
|
# api_exc.ResourceNotFound here
|
||||||
# The logical router was not found
|
# The logical router was not found
|
||||||
LOG.warning(_LW("Logical router for neutron router %s not "
|
LOG.warning("Logical router for neutron router %s not "
|
||||||
"found on NSX."), neutron_router_data['id'])
|
"found on NSX.", neutron_router_data['id'])
|
||||||
if lrouter:
|
if lrouter:
|
||||||
# Update the cache
|
# Update the cache
|
||||||
self._nsx_cache.update_lrouter(lrouter)
|
self._nsx_cache.update_lrouter(lrouter)
|
||||||
@ -410,8 +410,8 @@ class NsxSynchronizer(object):
|
|||||||
neutron_router_mappings[neutron_router_id] = (
|
neutron_router_mappings[neutron_router_id] = (
|
||||||
self._nsx_cache[lr_uuid])
|
self._nsx_cache[lr_uuid])
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Unable to find Neutron router id for "
|
LOG.warning("Unable to find Neutron router id for "
|
||||||
"NSX logical router: %s"), lr_uuid)
|
"NSX logical router: %s", lr_uuid)
|
||||||
# Fetch neutron routers from database
|
# Fetch neutron routers from database
|
||||||
filters = ({} if scan_missing else
|
filters = ({} if scan_missing else
|
||||||
{'id': neutron_router_mappings.keys()})
|
{'id': neutron_router_mappings.keys()})
|
||||||
@ -452,8 +452,8 @@ class NsxSynchronizer(object):
|
|||||||
# api_exc.ResourceNotFound here instead
|
# api_exc.ResourceNotFound here instead
|
||||||
# of PortNotFoundOnNetwork when the id exists but
|
# of PortNotFoundOnNetwork when the id exists but
|
||||||
# the logical switch port was not found
|
# the logical switch port was not found
|
||||||
LOG.warning(_LW("Logical switch port for neutron port %s "
|
LOG.warning("Logical switch port for neutron port %s "
|
||||||
"not found on NSX."), neutron_port_data['id'])
|
"not found on NSX.", neutron_port_data['id'])
|
||||||
lswitchport = None
|
lswitchport = None
|
||||||
else:
|
else:
|
||||||
# If lswitchport is not None, update the cache.
|
# If lswitchport is not None, update the cache.
|
||||||
@ -545,11 +545,11 @@ class NsxSynchronizer(object):
|
|||||||
# be emitted.
|
# be emitted.
|
||||||
num_requests = page_size // (MAX_PAGE_SIZE + 1) + 1
|
num_requests = page_size // (MAX_PAGE_SIZE + 1) + 1
|
||||||
if num_requests > 1:
|
if num_requests > 1:
|
||||||
LOG.warning(_LW("Requested page size is %(cur_chunk_size)d. "
|
LOG.warning("Requested page size is %(cur_chunk_size)d. "
|
||||||
"It might be necessary to do %(num_requests)d "
|
"It might be necessary to do %(num_requests)d "
|
||||||
"round-trips to NSX for fetching data. Please "
|
"round-trips to NSX for fetching data. Please "
|
||||||
"tune sync parameters to ensure chunk size "
|
"tune sync parameters to ensure chunk size "
|
||||||
"is less than %(max_page_size)d"),
|
"is less than %(max_page_size)d",
|
||||||
{'cur_chunk_size': page_size,
|
{'cur_chunk_size': page_size,
|
||||||
'num_requests': num_requests,
|
'num_requests': num_requests,
|
||||||
'max_page_size': MAX_PAGE_SIZE})
|
'max_page_size': MAX_PAGE_SIZE})
|
||||||
@ -578,8 +578,8 @@ class NsxSynchronizer(object):
|
|||||||
def _fetch_nsx_data_chunk(self, sp):
|
def _fetch_nsx_data_chunk(self, sp):
|
||||||
base_chunk_size = sp.chunk_size
|
base_chunk_size = sp.chunk_size
|
||||||
chunk_size = base_chunk_size + sp.extra_chunk_size
|
chunk_size = base_chunk_size + sp.extra_chunk_size
|
||||||
LOG.info(_LI("Fetching up to %s resources "
|
LOG.info("Fetching up to %s resources "
|
||||||
"from NSX backend"), chunk_size)
|
"from NSX backend", chunk_size)
|
||||||
fetched = ls_count = lr_count = lp_count = 0
|
fetched = ls_count = lr_count = lp_count = 0
|
||||||
lswitches = lrouters = lswitchports = []
|
lswitches = lrouters = lswitchports = []
|
||||||
if sp.ls_cursor or sp.ls_cursor == 'start':
|
if sp.ls_cursor or sp.ls_cursor == 'start':
|
||||||
@ -618,7 +618,7 @@ class NsxSynchronizer(object):
|
|||||||
# Reset page cursor variables if necessary
|
# Reset page cursor variables if necessary
|
||||||
if sp.current_chunk == 0:
|
if sp.current_chunk == 0:
|
||||||
sp.ls_cursor = sp.lr_cursor = sp.lp_cursor = 'start'
|
sp.ls_cursor = sp.lr_cursor = sp.lp_cursor = 'start'
|
||||||
LOG.info(_LI("Running state synchronization task. Chunk: %s"),
|
LOG.info("Running state synchronization task. Chunk: %s",
|
||||||
sp.current_chunk)
|
sp.current_chunk)
|
||||||
# Fetch chunk_size data from NSX
|
# Fetch chunk_size data from NSX
|
||||||
try:
|
try:
|
||||||
@ -628,9 +628,9 @@ class NsxSynchronizer(object):
|
|||||||
sleep_interval = self._sync_backoff
|
sleep_interval = self._sync_backoff
|
||||||
# Cap max back off to 64 seconds
|
# Cap max back off to 64 seconds
|
||||||
self._sync_backoff = min(self._sync_backoff * 2, 64)
|
self._sync_backoff = min(self._sync_backoff * 2, 64)
|
||||||
LOG.exception(_LE("An error occurred while communicating with "
|
LOG.exception("An error occurred while communicating with "
|
||||||
"NSX backend. Will retry synchronization "
|
"NSX backend. Will retry synchronization "
|
||||||
"in %d seconds"), sleep_interval)
|
"in %d seconds", sleep_interval)
|
||||||
return sleep_interval
|
return sleep_interval
|
||||||
LOG.debug("Time elapsed querying NSX: %s",
|
LOG.debug("Time elapsed querying NSX: %s",
|
||||||
timeutils.utcnow() - start)
|
timeutils.utcnow() - start)
|
||||||
@ -669,8 +669,8 @@ class NsxSynchronizer(object):
|
|||||||
self._synchronize_lswitchports(ctx, lp_uuids,
|
self._synchronize_lswitchports(ctx, lp_uuids,
|
||||||
scan_missing=scan_missing)
|
scan_missing=scan_missing)
|
||||||
# Increase chunk counter
|
# Increase chunk counter
|
||||||
LOG.info(_LI("Synchronization for chunk %(chunk_num)d of "
|
LOG.info("Synchronization for chunk %(chunk_num)d of "
|
||||||
"%(total_chunks)d performed"),
|
"%(total_chunks)d performed",
|
||||||
{'chunk_num': sp.current_chunk + 1,
|
{'chunk_num': sp.current_chunk + 1,
|
||||||
'total_chunks': num_chunks})
|
'total_chunks': num_chunks})
|
||||||
sp.current_chunk = (sp.current_chunk + 1) % num_chunks
|
sp.current_chunk = (sp.current_chunk + 1) % num_chunks
|
||||||
|
@ -28,8 +28,6 @@ from neutron_lib import constants
|
|||||||
from oslo_context import context as common_context
|
from oslo_context import context as common_context
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
MAX_DISPLAY_NAME_LEN = 40
|
MAX_DISPLAY_NAME_LEN = 40
|
||||||
@ -162,8 +160,8 @@ def read_file(path):
|
|||||||
with open(path) as file:
|
with open(path) as file:
|
||||||
return file.read().strip()
|
return file.read().strip()
|
||||||
except IOError as e:
|
except IOError as e:
|
||||||
LOG.error(_LE("Error while opening file "
|
LOG.error("Error while opening file "
|
||||||
"%(path)s: %(err)s"), {'path': path, 'err': str(e)})
|
"%(path)s: %(err)s", {'path': path, 'err': str(e)})
|
||||||
|
|
||||||
|
|
||||||
def get_name_and_uuid(name, uuid, tag=None, maxlen=80):
|
def get_name_and_uuid(name, uuid, tag=None, maxlen=80):
|
||||||
|
@ -34,7 +34,6 @@ from neutron_lib import constants as n_constants
|
|||||||
from neutron_lib.db import model_base
|
from neutron_lib.db import model_base
|
||||||
from neutron_lib.utils import helpers
|
from neutron_lib.utils import helpers
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LW
|
|
||||||
from vmware_nsx.extensions import providersecuritygroup as provider_sg
|
from vmware_nsx.extensions import providersecuritygroup as provider_sg
|
||||||
from vmware_nsx.extensions import securitygrouplogging as sg_logging
|
from vmware_nsx.extensions import securitygrouplogging as sg_logging
|
||||||
from vmware_nsx.extensions import securitygrouppolicy as sg_policy
|
from vmware_nsx.extensions import securitygrouppolicy as sg_policy
|
||||||
@ -204,8 +203,8 @@ class ExtendedSecurityGroupPropertiesMixin(object):
|
|||||||
if self._is_provider_security_group(context, sg):
|
if self._is_provider_security_group(context, sg):
|
||||||
if only_warn:
|
if only_warn:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW("Ignored provider security group %(sg)s in "
|
"Ignored provider security group %(sg)s in "
|
||||||
"security groups list for port %(id)s"),
|
"security groups list for port %(id)s",
|
||||||
{'sg': sg, 'id': port['id']})
|
{'sg': sg, 'id': port['id']})
|
||||||
else:
|
else:
|
||||||
raise provider_sg.SecurityGroupIsProvider(id=sg)
|
raise provider_sg.SecurityGroupIsProvider(id=sg)
|
||||||
|
@ -27,7 +27,7 @@ from sqlalchemy import func
|
|||||||
from sqlalchemy.orm import exc
|
from sqlalchemy.orm import exc
|
||||||
from sqlalchemy.sql import expression as expr
|
from sqlalchemy.sql import expression as expr
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
from vmware_nsx.common import nsxv_constants
|
from vmware_nsx.common import nsxv_constants
|
||||||
from vmware_nsx.db import db as nsx_db
|
from vmware_nsx.db import db as nsx_db
|
||||||
@ -70,10 +70,10 @@ def warn_on_binding_status_error(f, *args, **kwargs):
|
|||||||
|
|
||||||
for binding in bindings:
|
for binding in bindings:
|
||||||
if binding and binding['status'] == neutron_const.ERROR:
|
if binding and binding['status'] == neutron_const.ERROR:
|
||||||
LOG.warning(_LW("Found NSXV router binding entry with status "
|
LOG.warning("Found NSXV router binding entry with status "
|
||||||
"%(status)s: router %(router)s, "
|
"%(status)s: router %(router)s, "
|
||||||
"edge %(edge)s, lswitch %(lswitch)s, "
|
"edge %(edge)s, lswitch %(lswitch)s, "
|
||||||
"status description: %(desc)s "),
|
"status description: %(desc)s ",
|
||||||
{'status': binding['status'],
|
{'status': binding['status'],
|
||||||
'router': binding['router_id'],
|
'router': binding['router_id'],
|
||||||
'edge': binding['edge_id'],
|
'edge': binding['edge_id'],
|
||||||
@ -318,8 +318,8 @@ def create_edge_dhcp_static_binding(session, edge_id, mac_address, binding_id):
|
|||||||
return _create_edge_dhcp_static_binding(session, edge_id, mac_address,
|
return _create_edge_dhcp_static_binding(session, edge_id, mac_address,
|
||||||
binding_id)
|
binding_id)
|
||||||
except db_exc.DBDuplicateEntry:
|
except db_exc.DBDuplicateEntry:
|
||||||
LOG.warning(_LW('Conflicting DHCP binding entry for '
|
LOG.warning('Conflicting DHCP binding entry for '
|
||||||
'%(edge_id)s:%(mac_address)s. Overwriting!'),
|
'%(edge_id)s:%(mac_address)s. Overwriting!',
|
||||||
{'edge_id': edge_id, 'mac_address': mac_address})
|
{'edge_id': edge_id, 'mac_address': mac_address})
|
||||||
delete_edge_dhcp_static_binding(session, edge_id, mac_address)
|
delete_edge_dhcp_static_binding(session, edge_id, mac_address)
|
||||||
return _create_edge_dhcp_static_binding(session, edge_id, mac_address,
|
return _create_edge_dhcp_static_binding(session, edge_id, mac_address,
|
||||||
@ -373,8 +373,8 @@ def create_nsxv_internal_network(session, network_purpose,
|
|||||||
session.add(network)
|
session.add(network)
|
||||||
except db_exc.DBDuplicateEntry:
|
except db_exc.DBDuplicateEntry:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Duplicate internal network for purpose "
|
LOG.exception("Duplicate internal network for purpose "
|
||||||
"%(p)s and availabiltiy zone %(az)s"),
|
"%(p)s and availabiltiy zone %(az)s",
|
||||||
{'p': network_purpose,
|
{'p': network_purpose,
|
||||||
'az': availability_zone})
|
'az': availability_zone})
|
||||||
|
|
||||||
@ -412,7 +412,7 @@ def create_nsxv_internal_edge(session, ext_ip_address, purpose, router_id):
|
|||||||
session.add(internal_edge)
|
session.add(internal_edge)
|
||||||
except db_exc.DBDuplicateEntry:
|
except db_exc.DBDuplicateEntry:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Duplicate internal Edge IP %s"),
|
LOG.exception("Duplicate internal Edge IP %s",
|
||||||
ext_ip_address)
|
ext_ip_address)
|
||||||
|
|
||||||
|
|
||||||
|
@ -23,7 +23,6 @@ from neutron.db import models_v2
|
|||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LI
|
|
||||||
from vmware_nsx.db import nsx_models
|
from vmware_nsx.db import nsx_models
|
||||||
from vmware_nsx.extensions import qos_queue as qos
|
from vmware_nsx.extensions import qos_queue as qos
|
||||||
|
|
||||||
@ -254,8 +253,8 @@ class QoSDbMixin(qos.QueuePluginBase):
|
|||||||
if dscp:
|
if dscp:
|
||||||
# must raise because a non-zero dscp was provided
|
# must raise because a non-zero dscp was provided
|
||||||
raise qos.QueueInvalidMarking()
|
raise qos.QueueInvalidMarking()
|
||||||
LOG.info(_LI("DSCP value (%s) will be ignored with 'trusted' "
|
LOG.info("DSCP value (%s) will be ignored with 'trusted' "
|
||||||
"marking"), dscp)
|
"marking", dscp)
|
||||||
max = qos_queue.get('max')
|
max = qos_queue.get('max')
|
||||||
min = qos_queue.get('min')
|
min = qos_queue.get('min')
|
||||||
# Max can be None
|
# Max can be None
|
||||||
|
@ -21,7 +21,7 @@ from oslo_db import exception as db_exc
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.api_client import exception as api_exc
|
from vmware_nsx.api_client import exception as api_exc
|
||||||
from vmware_nsx.common import exceptions as p_exc
|
from vmware_nsx.common import exceptions as p_exc
|
||||||
from vmware_nsx.common import nsx_utils
|
from vmware_nsx.common import nsx_utils
|
||||||
@ -69,14 +69,14 @@ class LsnManager(object):
|
|||||||
return lsn_api.lsn_for_network_get(self.cluster, network_id)
|
return lsn_api.lsn_for_network_get(self.cluster, network_id)
|
||||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||||
if raise_on_err:
|
if raise_on_err:
|
||||||
LOG.error(_LE('Unable to find Logical Service Node for '
|
LOG.error('Unable to find Logical Service Node for '
|
||||||
'network %s.'),
|
'network %s.',
|
||||||
network_id)
|
network_id)
|
||||||
raise p_exc.LsnNotFound(entity='network',
|
raise p_exc.LsnNotFound(entity='network',
|
||||||
entity_id=network_id)
|
entity_id=network_id)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Unable to find Logical Service Node for '
|
LOG.warning('Unable to find Logical Service Node for '
|
||||||
'the requested network %s.'),
|
'the requested network %s.',
|
||||||
network_id)
|
network_id)
|
||||||
|
|
||||||
def lsn_create(self, context, network_id):
|
def lsn_create(self, context, network_id):
|
||||||
@ -92,7 +92,7 @@ class LsnManager(object):
|
|||||||
try:
|
try:
|
||||||
lsn_api.lsn_delete(self.cluster, lsn_id)
|
lsn_api.lsn_delete(self.cluster, lsn_id)
|
||||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||||
LOG.warning(_LW('Unable to delete Logical Service Node %s'),
|
LOG.warning('Unable to delete Logical Service Node %s',
|
||||||
lsn_id)
|
lsn_id)
|
||||||
|
|
||||||
def lsn_delete_by_network(self, context, network_id):
|
def lsn_delete_by_network(self, context, network_id):
|
||||||
@ -110,17 +110,17 @@ class LsnManager(object):
|
|||||||
self.cluster, lsn_id, subnet_id)
|
self.cluster, lsn_id, subnet_id)
|
||||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||||
if raise_on_err:
|
if raise_on_err:
|
||||||
LOG.error(_LE('Unable to find Logical Service Node Port '
|
LOG.error('Unable to find Logical Service Node Port '
|
||||||
'for LSN %(lsn_id)s and subnet '
|
'for LSN %(lsn_id)s and subnet '
|
||||||
'%(subnet_id)s'),
|
'%(subnet_id)s',
|
||||||
{'lsn_id': lsn_id, 'subnet_id': subnet_id})
|
{'lsn_id': lsn_id, 'subnet_id': subnet_id})
|
||||||
raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
|
raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
|
||||||
entity='subnet',
|
entity='subnet',
|
||||||
entity_id=subnet_id)
|
entity_id=subnet_id)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Unable to find Logical Service Node Port '
|
LOG.warning('Unable to find Logical Service Node Port '
|
||||||
'for LSN %(lsn_id)s and subnet '
|
'for LSN %(lsn_id)s and subnet '
|
||||||
'%(subnet_id)s'),
|
'%(subnet_id)s',
|
||||||
{'lsn_id': lsn_id, 'subnet_id': subnet_id})
|
{'lsn_id': lsn_id, 'subnet_id': subnet_id})
|
||||||
return (lsn_id, None)
|
return (lsn_id, None)
|
||||||
else:
|
else:
|
||||||
@ -137,17 +137,17 @@ class LsnManager(object):
|
|||||||
self.cluster, lsn_id, mac)
|
self.cluster, lsn_id, mac)
|
||||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||||
if raise_on_err:
|
if raise_on_err:
|
||||||
LOG.error(_LE('Unable to find Logical Service Node Port '
|
LOG.error('Unable to find Logical Service Node Port '
|
||||||
'for LSN %(lsn_id)s and mac address '
|
'for LSN %(lsn_id)s and mac address '
|
||||||
'%(mac)s'),
|
'%(mac)s',
|
||||||
{'lsn_id': lsn_id, 'mac': mac})
|
{'lsn_id': lsn_id, 'mac': mac})
|
||||||
raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
|
raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
|
||||||
entity='MAC',
|
entity='MAC',
|
||||||
entity_id=mac)
|
entity_id=mac)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Unable to find Logical Service Node '
|
LOG.warning('Unable to find Logical Service Node '
|
||||||
'Port for LSN %(lsn_id)s and mac address '
|
'Port for LSN %(lsn_id)s and mac address '
|
||||||
'%(mac)s'),
|
'%(mac)s',
|
||||||
{'lsn_id': lsn_id, 'mac': mac})
|
{'lsn_id': lsn_id, 'mac': mac})
|
||||||
return (lsn_id, None)
|
return (lsn_id, None)
|
||||||
else:
|
else:
|
||||||
@ -170,7 +170,7 @@ class LsnManager(object):
|
|||||||
try:
|
try:
|
||||||
lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id)
|
lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id)
|
||||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||||
LOG.warning(_LW('Unable to delete LSN Port %s'), lsn_port_id)
|
LOG.warning('Unable to delete LSN Port %s', lsn_port_id)
|
||||||
|
|
||||||
def lsn_port_dispose(self, context, network_id, mac_address):
|
def lsn_port_dispose(self, context, network_id, mac_address):
|
||||||
"""Delete a LSN port given the network and the mac address."""
|
"""Delete a LSN port given the network and the mac address."""
|
||||||
@ -187,12 +187,12 @@ class LsnManager(object):
|
|||||||
self.cluster, network_id, lswitch_port_id)
|
self.cluster, network_id, lswitch_port_id)
|
||||||
except (n_exc.PortNotFoundOnNetwork,
|
except (n_exc.PortNotFoundOnNetwork,
|
||||||
api_exc.NsxApiException):
|
api_exc.NsxApiException):
|
||||||
LOG.warning(_LW("Metadata port not found while attempting "
|
LOG.warning("Metadata port not found while attempting "
|
||||||
"to delete it from network %s"),
|
"to delete it from network %s",
|
||||||
network_id)
|
network_id)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Unable to find Logical Services Node "
|
LOG.warning("Unable to find Logical Services Node "
|
||||||
"Port with MAC %s"), mac_address)
|
"Port with MAC %s", mac_address)
|
||||||
|
|
||||||
def lsn_port_dhcp_setup(
|
def lsn_port_dhcp_setup(
|
||||||
self, context, network_id, port_id, port_data, subnet_config=None):
|
self, context, network_id, port_id, port_data, subnet_config=None):
|
||||||
@ -319,8 +319,8 @@ class LsnManager(object):
|
|||||||
if lsn_id and lsn_port_id:
|
if lsn_id and lsn_port_id:
|
||||||
hdlr(self.cluster, lsn_id, lsn_port_id, data)
|
hdlr(self.cluster, lsn_id, lsn_port_id, data)
|
||||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||||
LOG.error(_LE('Error while configuring LSN '
|
LOG.error('Error while configuring LSN '
|
||||||
'port %s'), lsn_port_id)
|
'port %s', lsn_port_id)
|
||||||
raise p_exc.PortConfigurationError(
|
raise p_exc.PortConfigurationError(
|
||||||
net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id)
|
net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id)
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ from neutron_lib import constants as const
|
|||||||
from neutron_lib import exceptions as n_exc
|
from neutron_lib import exceptions as n_exc
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import exceptions as p_exc
|
from vmware_nsx.common import exceptions as p_exc
|
||||||
from vmware_nsx.dhcp_meta import nsx
|
from vmware_nsx.dhcp_meta import nsx
|
||||||
from vmware_nsx.dhcp_meta import rpc
|
from vmware_nsx.dhcp_meta import rpc
|
||||||
@ -80,7 +80,7 @@ class DhcpMetadataBuilder(object):
|
|||||||
try:
|
try:
|
||||||
self.plugin.delete_port(context, port['id'])
|
self.plugin.delete_port(context, port['id'])
|
||||||
except n_exc.PortNotFound:
|
except n_exc.PortNotFound:
|
||||||
LOG.error(_LE('Port %s is already gone'), port['id'])
|
LOG.error('Port %s is already gone', port['id'])
|
||||||
|
|
||||||
def dhcp_allocate(self, context, network_id, subnet):
|
def dhcp_allocate(self, context, network_id, subnet):
|
||||||
"""Allocate dhcp resources for the subnet."""
|
"""Allocate dhcp resources for the subnet."""
|
||||||
|
@ -28,7 +28,7 @@ from neutron.common import rpc as n_rpc
|
|||||||
from neutron.common import topics
|
from neutron.common import topics
|
||||||
from neutron.db import agents_db
|
from neutron.db import agents_db
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import config
|
from vmware_nsx.common import config
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
from vmware_nsx.dhcp_meta import combined
|
from vmware_nsx.dhcp_meta import combined
|
||||||
@ -121,7 +121,7 @@ class DhcpMetadataAccess(object):
|
|||||||
# This becomes ineffective, as all new networks creations
|
# This becomes ineffective, as all new networks creations
|
||||||
# are handled by Logical Services Nodes in NSX
|
# are handled by Logical Services Nodes in NSX
|
||||||
cfg.CONF.set_override('network_auto_schedule', False)
|
cfg.CONF.set_override('network_auto_schedule', False)
|
||||||
LOG.warning(_LW('network_auto_schedule has been disabled'))
|
LOG.warning('network_auto_schedule has been disabled')
|
||||||
notifier = combined.DhcpAgentNotifyAPI(self.safe_reference,
|
notifier = combined.DhcpAgentNotifyAPI(self.safe_reference,
|
||||||
lsn_manager)
|
lsn_manager)
|
||||||
self.supported_extension_aliases.append(lsn.EXT_ALIAS)
|
self.supported_extension_aliases.append(lsn.EXT_ALIAS)
|
||||||
|
@ -25,7 +25,7 @@ from neutron.db import db_base_plugin_v2
|
|||||||
from neutron.db import l3_db
|
from neutron.db import l3_db
|
||||||
from neutron.extensions import external_net
|
from neutron.extensions import external_net
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE, _LI
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import exceptions as p_exc
|
from vmware_nsx.common import exceptions as p_exc
|
||||||
from vmware_nsx.dhcp_meta import constants as d_const
|
from vmware_nsx.dhcp_meta import constants as d_const
|
||||||
from vmware_nsx.nsxlib.mh import lsn as lsn_api
|
from vmware_nsx.nsxlib.mh import lsn as lsn_api
|
||||||
@ -138,9 +138,9 @@ class DhcpAgentNotifyAPI(object):
|
|||||||
# down below as well as handle_port_metadata_access
|
# down below as well as handle_port_metadata_access
|
||||||
self.plugin.create_port(context, {'port': dhcp_port})
|
self.plugin.create_port(context, {'port': dhcp_port})
|
||||||
except p_exc.PortConfigurationError as e:
|
except p_exc.PortConfigurationError as e:
|
||||||
LOG.error(_LE("Error while creating subnet %(cidr)s for "
|
LOG.error("Error while creating subnet %(cidr)s for "
|
||||||
"network %(network)s. Please, contact "
|
"network %(network)s. Please, contact "
|
||||||
"administrator"),
|
"administrator",
|
||||||
{"cidr": subnet["cidr"],
|
{"cidr": subnet["cidr"],
|
||||||
"network": network_id})
|
"network": network_id})
|
||||||
db_base_plugin_v2.NeutronDbPluginV2.delete_port(
|
db_base_plugin_v2.NeutronDbPluginV2.delete_port(
|
||||||
@ -207,12 +207,12 @@ def check_services_requirements(cluster):
|
|||||||
|
|
||||||
|
|
||||||
def handle_network_dhcp_access(plugin, context, network, action):
|
def handle_network_dhcp_access(plugin, context, network, action):
|
||||||
LOG.info(_LI("Performing DHCP %(action)s for resource: %(resource)s"),
|
LOG.info("Performing DHCP %(action)s for resource: %(resource)s",
|
||||||
{"action": action, "resource": network})
|
{"action": action, "resource": network})
|
||||||
if action == 'create_network':
|
if action == 'create_network':
|
||||||
network_id = network['id']
|
network_id = network['id']
|
||||||
if network.get(external_net.EXTERNAL):
|
if network.get(external_net.EXTERNAL):
|
||||||
LOG.info(_LI("Network %s is external: no LSN to create"),
|
LOG.info("Network %s is external: no LSN to create",
|
||||||
network_id)
|
network_id)
|
||||||
return
|
return
|
||||||
plugin.lsn_manager.lsn_create(context, network_id)
|
plugin.lsn_manager.lsn_create(context, network_id)
|
||||||
@ -221,12 +221,12 @@ def handle_network_dhcp_access(plugin, context, network, action):
|
|||||||
# is just the network id
|
# is just the network id
|
||||||
network_id = network
|
network_id = network
|
||||||
plugin.lsn_manager.lsn_delete_by_network(context, network_id)
|
plugin.lsn_manager.lsn_delete_by_network(context, network_id)
|
||||||
LOG.info(_LI("Logical Services Node for network "
|
LOG.info("Logical Services Node for network "
|
||||||
"%s configured successfully"), network_id)
|
"%s configured successfully", network_id)
|
||||||
|
|
||||||
|
|
||||||
def handle_port_dhcp_access(plugin, context, port, action):
|
def handle_port_dhcp_access(plugin, context, port, action):
|
||||||
LOG.info(_LI("Performing DHCP %(action)s for resource: %(resource)s"),
|
LOG.info("Performing DHCP %(action)s for resource: %(resource)s",
|
||||||
{"action": action, "resource": port})
|
{"action": action, "resource": port})
|
||||||
if port["device_owner"] == const.DEVICE_OWNER_DHCP:
|
if port["device_owner"] == const.DEVICE_OWNER_DHCP:
|
||||||
network_id = port["network_id"]
|
network_id = port["network_id"]
|
||||||
@ -243,8 +243,8 @@ def handle_port_dhcp_access(plugin, context, port, action):
|
|||||||
plugin.lsn_manager.lsn_port_dhcp_setup(
|
plugin.lsn_manager.lsn_port_dhcp_setup(
|
||||||
context, network_id, port['id'], subnet_data, subnet)
|
context, network_id, port['id'], subnet_data, subnet)
|
||||||
except p_exc.PortConfigurationError:
|
except p_exc.PortConfigurationError:
|
||||||
LOG.error(_LE("Error while configuring DHCP for "
|
LOG.error("Error while configuring DHCP for "
|
||||||
"port %s"), port['id'])
|
"port %s", port['id'])
|
||||||
raise n_exc.NeutronException()
|
raise n_exc.NeutronException()
|
||||||
elif action == "delete_port":
|
elif action == "delete_port":
|
||||||
plugin.lsn_manager.lsn_port_dispose(context, network_id,
|
plugin.lsn_manager.lsn_port_dispose(context, network_id,
|
||||||
@ -254,8 +254,8 @@ def handle_port_dhcp_access(plugin, context, port, action):
|
|||||||
# do something only if there are IP's and dhcp is enabled
|
# do something only if there are IP's and dhcp is enabled
|
||||||
subnet_id = port["fixed_ips"][0]['subnet_id']
|
subnet_id = port["fixed_ips"][0]['subnet_id']
|
||||||
if not plugin.get_subnet(context, subnet_id)['enable_dhcp']:
|
if not plugin.get_subnet(context, subnet_id)['enable_dhcp']:
|
||||||
LOG.info(_LI("DHCP is disabled for subnet %s: nothing "
|
LOG.info("DHCP is disabled for subnet %s: nothing "
|
||||||
"to do"), subnet_id)
|
"to do", subnet_id)
|
||||||
return
|
return
|
||||||
host_data = {
|
host_data = {
|
||||||
"mac_address": port["mac_address"],
|
"mac_address": port["mac_address"],
|
||||||
@ -273,7 +273,7 @@ def handle_port_dhcp_access(plugin, context, port, action):
|
|||||||
if action == 'create_port':
|
if action == 'create_port':
|
||||||
db_base_plugin_v2.NeutronDbPluginV2.delete_port(
|
db_base_plugin_v2.NeutronDbPluginV2.delete_port(
|
||||||
plugin, context, port['id'])
|
plugin, context, port['id'])
|
||||||
LOG.info(_LI("DHCP for port %s configured successfully"), port['id'])
|
LOG.info("DHCP for port %s configured successfully", port['id'])
|
||||||
|
|
||||||
|
|
||||||
def handle_port_metadata_access(plugin, context, port, is_delete=False):
|
def handle_port_metadata_access(plugin, context, port, is_delete=False):
|
||||||
@ -281,7 +281,7 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
|
|||||||
network_id = port["network_id"]
|
network_id = port["network_id"]
|
||||||
network = plugin.get_network(context, network_id)
|
network = plugin.get_network(context, network_id)
|
||||||
if network[external_net.EXTERNAL]:
|
if network[external_net.EXTERNAL]:
|
||||||
LOG.info(_LI("Network %s is external: nothing to do"),
|
LOG.info("Network %s is external: nothing to do",
|
||||||
network_id)
|
network_id)
|
||||||
return
|
return
|
||||||
subnet_id = port["fixed_ips"][0]['subnet_id']
|
subnet_id = port["fixed_ips"][0]['subnet_id']
|
||||||
@ -290,7 +290,7 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
|
|||||||
"tenant_id": port["tenant_id"],
|
"tenant_id": port["tenant_id"],
|
||||||
"ip_address": port["fixed_ips"][0]['ip_address']
|
"ip_address": port["fixed_ips"][0]['ip_address']
|
||||||
}
|
}
|
||||||
LOG.info(_LI("Configuring metadata entry for port %s"), port)
|
LOG.info("Configuring metadata entry for port %s", port)
|
||||||
if not is_delete:
|
if not is_delete:
|
||||||
handler = plugin.lsn_manager.lsn_port_meta_host_add
|
handler = plugin.lsn_manager.lsn_port_meta_host_add
|
||||||
else:
|
else:
|
||||||
@ -302,13 +302,13 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
|
|||||||
if not is_delete:
|
if not is_delete:
|
||||||
db_base_plugin_v2.NeutronDbPluginV2.delete_port(
|
db_base_plugin_v2.NeutronDbPluginV2.delete_port(
|
||||||
plugin, context, port['id'])
|
plugin, context, port['id'])
|
||||||
LOG.info(_LI("Metadata for port %s configured successfully"),
|
LOG.info("Metadata for port %s configured successfully",
|
||||||
port['id'])
|
port['id'])
|
||||||
|
|
||||||
|
|
||||||
def handle_router_metadata_access(plugin, context, router_id, interface=None):
|
def handle_router_metadata_access(plugin, context, router_id, interface=None):
|
||||||
LOG.info(_LI("Handle metadata access via router: %(r)s and "
|
LOG.info("Handle metadata access via router: %(r)s and "
|
||||||
"interface %(i)s"), {'r': router_id, 'i': interface})
|
"interface %(i)s", {'r': router_id, 'i': interface})
|
||||||
if interface:
|
if interface:
|
||||||
try:
|
try:
|
||||||
plugin.get_port(context, interface['port_id'])
|
plugin.get_port(context, interface['port_id'])
|
||||||
@ -324,4 +324,4 @@ def handle_router_metadata_access(plugin, context, router_id, interface=None):
|
|||||||
if is_enabled:
|
if is_enabled:
|
||||||
l3_db.L3_NAT_db_mixin.remove_router_interface(
|
l3_db.L3_NAT_db_mixin.remove_router_interface(
|
||||||
plugin, context, router_id, interface)
|
plugin, context, router_id, interface)
|
||||||
LOG.info(_LI("Metadata for router %s handled successfully"), router_id)
|
LOG.info("Metadata for router %s handled successfully", router_id)
|
||||||
|
@ -24,7 +24,6 @@ from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
|
|||||||
from neutron.db import db_base_plugin_v2
|
from neutron.db import db_base_plugin_v2
|
||||||
from neutron.db import models_v2
|
from neutron.db import models_v2
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI, _LW
|
|
||||||
from vmware_nsx.api_client import exception as api_exc
|
from vmware_nsx.api_client import exception as api_exc
|
||||||
from vmware_nsx.common import config
|
from vmware_nsx.common import config
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
@ -55,7 +54,7 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
|
|||||||
if not port.get('fixed_ips'):
|
if not port.get('fixed_ips'):
|
||||||
# If port does not have an IP, the associated subnet is in
|
# If port does not have an IP, the associated subnet is in
|
||||||
# deleting state.
|
# deleting state.
|
||||||
LOG.info(_LI('Port %s has no IP due to subnet in deleting state'),
|
LOG.info('Port %s has no IP due to subnet in deleting state',
|
||||||
port['id'])
|
port['id'])
|
||||||
return
|
return
|
||||||
fixed_ip = port['fixed_ips'][0]
|
fixed_ip = port['fixed_ips'][0]
|
||||||
@ -66,8 +65,8 @@ def handle_port_metadata_access(plugin, context, port, is_delete=False):
|
|||||||
# route. This is done via the enable_isolated_metadata
|
# route. This is done via the enable_isolated_metadata
|
||||||
# option if desired.
|
# option if desired.
|
||||||
if not subnet.get('gateway_ip'):
|
if not subnet.get('gateway_ip'):
|
||||||
LOG.info(_LI('Subnet %s does not have a gateway, the '
|
LOG.info('Subnet %s does not have a gateway, the '
|
||||||
'metadata route will not be created'),
|
'metadata route will not be created',
|
||||||
subnet['id'])
|
subnet['id'])
|
||||||
return
|
return
|
||||||
metadata_routes = [r for r in subnet.routes
|
metadata_routes = [r for r in subnet.routes
|
||||||
@ -99,8 +98,8 @@ def handle_router_metadata_access(plugin, context, router_id, interface=None):
|
|||||||
LOG.debug("Metadata access network is disabled")
|
LOG.debug("Metadata access network is disabled")
|
||||||
return
|
return
|
||||||
if not cfg.CONF.allow_overlapping_ips:
|
if not cfg.CONF.allow_overlapping_ips:
|
||||||
LOG.warning(_LW("Overlapping IPs must be enabled in order to setup "
|
LOG.warning("Overlapping IPs must be enabled in order to setup "
|
||||||
"the metadata access network"))
|
"the metadata access network")
|
||||||
return
|
return
|
||||||
ctx_elevated = context.elevated()
|
ctx_elevated = context.elevated()
|
||||||
on_demand = getattr(plugin_cfg, 'metadata_on_demand', False)
|
on_demand = getattr(plugin_cfg, 'metadata_on_demand', False)
|
||||||
@ -138,8 +137,8 @@ def handle_router_metadata_access(plugin, context, router_id, interface=None):
|
|||||||
except (ntn_exc.NeutronException, nsx_exc.NsxPluginException,
|
except (ntn_exc.NeutronException, nsx_exc.NsxPluginException,
|
||||||
api_exc.NsxApiException):
|
api_exc.NsxApiException):
|
||||||
# Any exception here should be regarded as non-fatal
|
# Any exception here should be regarded as non-fatal
|
||||||
LOG.exception(_LE("An error occurred while operating on the "
|
LOG.exception("An error occurred while operating on the "
|
||||||
"metadata access network for router:'%s'"),
|
"metadata access network for router:'%s'",
|
||||||
router_id)
|
router_id)
|
||||||
|
|
||||||
|
|
||||||
|
@ -17,7 +17,6 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
from oslo_vmware import vim_util
|
from oslo_vmware import vim_util
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI
|
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
from vmware_nsx.dvs import dvs_utils
|
from vmware_nsx.dvs import dvs_utils
|
||||||
|
|
||||||
@ -154,10 +153,10 @@ class DvsManager(VCManagerBase):
|
|||||||
except Exception:
|
except Exception:
|
||||||
# NOTE(garyk): handle more specific exceptions
|
# NOTE(garyk): handle more specific exceptions
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE('Failed to create port group for '
|
LOG.exception('Failed to create port group for '
|
||||||
'%(net_id)s with tag %(tag)s.'),
|
'%(net_id)s with tag %(tag)s.',
|
||||||
{'net_id': net_id, 'tag': vlan_tag})
|
{'net_id': net_id, 'tag': vlan_tag})
|
||||||
LOG.info(_LI("%(net_id)s with tag %(vlan_tag)s created on %(dvs)s."),
|
LOG.info("%(net_id)s with tag %(vlan_tag)s created on %(dvs)s.",
|
||||||
{'net_id': net_id,
|
{'net_id': net_id,
|
||||||
'vlan_tag': vlan_tag,
|
'vlan_tag': vlan_tag,
|
||||||
'dvs': dvs_moref.value})
|
'dvs': dvs_moref.value})
|
||||||
@ -282,7 +281,7 @@ class DvsManager(VCManagerBase):
|
|||||||
self._session.vim,
|
self._session.vim,
|
||||||
pg_moref, ['config'])
|
pg_moref, ['config'])
|
||||||
if len(pg_spec) == 0 or len(pg_spec[0].propSet[0]) == 0:
|
if len(pg_spec) == 0 or len(pg_spec[0].propSet[0]) == 0:
|
||||||
LOG.error(_LE('Failed to get object properties of %s'), pg_moref)
|
LOG.error('Failed to get object properties of %s', pg_moref)
|
||||||
raise nsx_exc.DvsNotFound(dvs=pg_moref)
|
raise nsx_exc.DvsNotFound(dvs=pg_moref)
|
||||||
|
|
||||||
# Convert the extracted config to DVPortgroupConfigSpec
|
# Convert the extracted config to DVPortgroupConfigSpec
|
||||||
@ -298,7 +297,7 @@ class DvsManager(VCManagerBase):
|
|||||||
try:
|
try:
|
||||||
self._session.wait_for_task(task)
|
self._session.wait_for_task(task)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_LE('Failed to reconfigure DVPortGroup %s'), pg_moref)
|
LOG.error('Failed to reconfigure DVPortGroup %s', pg_moref)
|
||||||
raise nsx_exc.DvsNotFound(dvs=pg_moref)
|
raise nsx_exc.DvsNotFound(dvs=pg_moref)
|
||||||
|
|
||||||
# Update the dvs port groups config for a vxlan/vlan network
|
# Update the dvs port groups config for a vxlan/vlan network
|
||||||
@ -376,9 +375,9 @@ class DvsManager(VCManagerBase):
|
|||||||
except Exception:
|
except Exception:
|
||||||
# NOTE(garyk): handle more specific exceptions
|
# NOTE(garyk): handle more specific exceptions
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE('Failed to delete port group for %s.'),
|
LOG.exception('Failed to delete port group for %s.',
|
||||||
net_id)
|
net_id)
|
||||||
LOG.info(_LI("%(net_id)s delete from %(dvs)s."),
|
LOG.info("%(net_id)s delete from %(dvs)s.",
|
||||||
{'net_id': net_id,
|
{'net_id': net_id,
|
||||||
'dvs': dvs_moref.value})
|
'dvs': dvs_moref.value})
|
||||||
|
|
||||||
@ -514,11 +513,11 @@ class VMManager(VCManagerBase):
|
|||||||
spec=new_spec)
|
spec=new_spec)
|
||||||
try:
|
try:
|
||||||
self._session.wait_for_task(task)
|
self._session.wait_for_task(task)
|
||||||
LOG.info(_LI("Updated VM moref %(moref)s spec - "
|
LOG.info("Updated VM moref %(moref)s spec - "
|
||||||
"attached an interface"),
|
"attached an interface",
|
||||||
{'moref': vm_moref.value})
|
{'moref': vm_moref.value})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to reconfigure VM %(moref)s spec: %(e)s"),
|
LOG.error("Failed to reconfigure VM %(moref)s spec: %(e)s",
|
||||||
{'moref': vm_moref.value, 'e': e})
|
{'moref': vm_moref.value, 'e': e})
|
||||||
|
|
||||||
def _build_vm_spec_detach(self, device):
|
def _build_vm_spec_detach(self, device):
|
||||||
@ -541,10 +540,10 @@ class VMManager(VCManagerBase):
|
|||||||
spec=new_spec)
|
spec=new_spec)
|
||||||
try:
|
try:
|
||||||
self._session.wait_for_task(task)
|
self._session.wait_for_task(task)
|
||||||
LOG.info(_LI("Updated VM %(moref)s spec - detached an interface"),
|
LOG.info("Updated VM %(moref)s spec - detached an interface",
|
||||||
{'moref': vm_moref.value})
|
{'moref': vm_moref.value})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to reconfigure vm moref %(moref)s: %(e)s"),
|
LOG.error("Failed to reconfigure vm moref %(moref)s: %(e)s",
|
||||||
{'moref': vm_moref.value, 'e': e})
|
{'moref': vm_moref.value, 'e': e})
|
||||||
|
|
||||||
def get_vm_interfaces_info(self, vm_moref):
|
def get_vm_interfaces_info(self, vm_moref):
|
||||||
@ -566,7 +565,7 @@ class ClusterManager(VCManagerBase):
|
|||||||
session.vim, "ReconfigureComputeResource_Task",
|
session.vim, "ReconfigureComputeResource_Task",
|
||||||
cluster, spec=config_spec, modify=True)
|
cluster, spec=config_spec, modify=True)
|
||||||
except Exception as excep:
|
except Exception as excep:
|
||||||
LOG.exception(_LE('Failed to reconfigure cluster %s'), excep)
|
LOG.exception('Failed to reconfigure cluster %s', excep)
|
||||||
session.wait_for_task(reconfig_task)
|
session.wait_for_task(reconfig_task)
|
||||||
|
|
||||||
def _create_vm_group_spec(self, client_factory, name, vm_refs,
|
def _create_vm_group_spec(self, client_factory, name, vm_refs,
|
||||||
@ -702,7 +701,7 @@ class ClusterManager(VCManagerBase):
|
|||||||
found = True
|
found = True
|
||||||
break
|
break
|
||||||
if not found:
|
if not found:
|
||||||
LOG.error(_LE("%s does not exist"), host_group_name)
|
LOG.error("%s does not exist", host_group_name)
|
||||||
raise exceptions.NotFound()
|
raise exceptions.NotFound()
|
||||||
update_cluster = False
|
update_cluster = False
|
||||||
num_host_groups = len(host_group_names)
|
num_host_groups = len(host_group_names)
|
||||||
@ -745,7 +744,7 @@ class ClusterManager(VCManagerBase):
|
|||||||
try:
|
try:
|
||||||
self._reconfigure_cluster(session, cluster, config_spec)
|
self._reconfigure_cluster(session, cluster, config_spec)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE('Unable to update cluster for host groups %s'),
|
LOG.error('Unable to update cluster for host groups %s',
|
||||||
e)
|
e)
|
||||||
|
|
||||||
def _delete_vm_group_spec(self, client_factory, name):
|
def _delete_vm_group_spec(self, client_factory, name):
|
||||||
|
@ -26,7 +26,6 @@ from neutron.objects import network as net_obj
|
|||||||
from neutron.objects import ports as port_obj
|
from neutron.objects import ports as port_obj
|
||||||
from neutron.services.externaldns import driver
|
from neutron.services.externaldns import driver
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI
|
|
||||||
from vmware_nsx.common import driver_api
|
from vmware_nsx.common import driver_api
|
||||||
from vmware_nsx.plugins.nsx_v3 import availability_zones as nsx_az
|
from vmware_nsx.plugins.nsx_v3 import availability_zones as nsx_az
|
||||||
|
|
||||||
@ -272,7 +271,7 @@ class DNSExtensionDriver(driver_api.ExtensionDriver):
|
|||||||
class DNSExtensionDriverNSXv(DNSExtensionDriver):
|
class DNSExtensionDriverNSXv(DNSExtensionDriver):
|
||||||
|
|
||||||
def initialize(self):
|
def initialize(self):
|
||||||
LOG.info(_LI("DNSExtensionDriverNSXv initialization complete"))
|
LOG.info("DNSExtensionDriverNSXv initialization complete")
|
||||||
|
|
||||||
def external_dns_not_needed(self, context, network):
|
def external_dns_not_needed(self, context, network):
|
||||||
dns_driver = _get_dns_driver()
|
dns_driver = _get_dns_driver()
|
||||||
@ -287,7 +286,7 @@ class DNSExtensionDriverNSXv3(DNSExtensionDriver):
|
|||||||
|
|
||||||
def initialize(self):
|
def initialize(self):
|
||||||
self._availability_zones = nsx_az.NsxV3AvailabilityZones()
|
self._availability_zones = nsx_az.NsxV3AvailabilityZones()
|
||||||
LOG.info(_LI("DNSExtensionDriverNSXv3 initialization complete"))
|
LOG.info("DNSExtensionDriverNSXv3 initialization complete")
|
||||||
|
|
||||||
def _get_network_az(self, network_id):
|
def _get_network_az(self, network_id):
|
||||||
context = n_context.get_admin_context()
|
context = n_context.get_admin_context()
|
||||||
@ -337,7 +336,7 @@ def _get_dns_driver():
|
|||||||
cfg.CONF.external_dns_driver)
|
cfg.CONF.external_dns_driver)
|
||||||
return DNS_DRIVER
|
return DNS_DRIVER
|
||||||
except ImportError:
|
except ImportError:
|
||||||
LOG.exception(_LE("ImportError exception occurred while loading "
|
LOG.exception("ImportError exception occurred while loading "
|
||||||
"the external DNS service driver"))
|
"the external DNS service driver")
|
||||||
raise dns.ExternalDNSDriverNotFound(
|
raise dns.ExternalDNSDriverNotFound(
|
||||||
driver=cfg.CONF.external_dns_driver)
|
driver=cfg.CONF.external_dns_driver)
|
||||||
|
@ -17,7 +17,7 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LI
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import exceptions
|
from vmware_nsx.common import exceptions
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -60,8 +60,8 @@ class NSXCluster(object):
|
|||||||
raise exceptions.InvalidClusterConfiguration(
|
raise exceptions.InvalidClusterConfiguration(
|
||||||
invalid_attrs=self._required_attributes)
|
invalid_attrs=self._required_attributes)
|
||||||
if self._important_attributes:
|
if self._important_attributes:
|
||||||
LOG.info(_LI("The following cluster attributes were "
|
LOG.info("The following cluster attributes were "
|
||||||
"not specified: %s'"), self._important_attributes)
|
"not specified: %s'", self._important_attributes)
|
||||||
# The API client will be explicitly created by users of this class
|
# The API client will be explicitly created by users of this class
|
||||||
self.api_client = None
|
self.api_client = None
|
||||||
|
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
from oslo_serialization import jsonutils
|
from oslo_serialization import jsonutils
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE
|
|
||||||
from vmware_nsx.api_client import exception as api_exc
|
from vmware_nsx.api_client import exception as api_exc
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
from vmware_nsx.common import utils
|
from vmware_nsx.common import utils
|
||||||
@ -127,7 +126,7 @@ def _build_gateway_device_body(tenant_id, display_name, neutron_id,
|
|||||||
'ipsec_%s' % utils.NetworkTypes.GRE: "IPsecGRE"}
|
'ipsec_%s' % utils.NetworkTypes.GRE: "IPsecGRE"}
|
||||||
nsx_connector_type = connector_type_mappings.get(connector_type)
|
nsx_connector_type = connector_type_mappings.get(connector_type)
|
||||||
if connector_type and not nsx_connector_type:
|
if connector_type and not nsx_connector_type:
|
||||||
LOG.error(_LE("There is no NSX mapping for connector type %s"),
|
LOG.error("There is no NSX mapping for connector type %s",
|
||||||
connector_type)
|
connector_type)
|
||||||
raise nsx_exc.InvalidTransportType(transport_type=connector_type)
|
raise nsx_exc.InvalidTransportType(transport_type=connector_type)
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ from oslo_serialization import jsonutils
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE, _LI, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.api_client import exception as api_exc
|
from vmware_nsx.api_client import exception as api_exc
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
from vmware_nsx.common import utils
|
from vmware_nsx.common import utils
|
||||||
@ -255,8 +255,8 @@ def update_explicit_routes_lrouter(cluster, router_id, routes):
|
|||||||
router_id, route)
|
router_id, route)
|
||||||
added_routes.append(uuid)
|
added_routes.append(uuid)
|
||||||
except api_exc.NsxApiException:
|
except api_exc.NsxApiException:
|
||||||
LOG.exception(_LE('Cannot update NSX routes %(routes)s for '
|
LOG.exception('Cannot update NSX routes %(routes)s for '
|
||||||
'router %(router_id)s'),
|
'router %(router_id)s',
|
||||||
{'routes': routes, 'router_id': router_id})
|
{'routes': routes, 'router_id': router_id})
|
||||||
# Roll back to keep NSX in consistent state
|
# Roll back to keep NSX in consistent state
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
@ -473,13 +473,13 @@ def _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj):
|
|||||||
|
|
||||||
|
|
||||||
def create_lrouter_nosnat_rule_v2(cluster, _router_id, _match_criteria=None):
|
def create_lrouter_nosnat_rule_v2(cluster, _router_id, _match_criteria=None):
|
||||||
LOG.info(_LI("No SNAT rules cannot be applied as they are not available "
|
LOG.info("No SNAT rules cannot be applied as they are not available "
|
||||||
"in this version of the NSX platform"))
|
"in this version of the NSX platform")
|
||||||
|
|
||||||
|
|
||||||
def create_lrouter_nodnat_rule_v2(cluster, _router_id, _match_criteria=None):
|
def create_lrouter_nodnat_rule_v2(cluster, _router_id, _match_criteria=None):
|
||||||
LOG.info(_LI("No DNAT rules cannot be applied as they are not available "
|
LOG.info("No DNAT rules cannot be applied as they are not available "
|
||||||
"in this version of the NSX platform"))
|
"in this version of the NSX platform")
|
||||||
|
|
||||||
|
|
||||||
def create_lrouter_snat_rule_v2(cluster, router_id,
|
def create_lrouter_snat_rule_v2(cluster, router_id,
|
||||||
@ -579,9 +579,9 @@ def delete_nat_rules_by_match(cluster, router_id, rule_type,
|
|||||||
min_rules=min_num_expected,
|
min_rules=min_num_expected,
|
||||||
max_rules=max_num_expected)
|
max_rules=max_num_expected)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Found %(actual_rule_num)d matching NAT rules, "
|
LOG.warning("Found %(actual_rule_num)d matching NAT rules, "
|
||||||
"which is not in the expected range "
|
"which is not in the expected range "
|
||||||
"(%(min_exp_rule_num)d,%(max_exp_rule_num)d)"),
|
"(%(min_exp_rule_num)d,%(max_exp_rule_num)d)",
|
||||||
{'actual_rule_num': num_rules_to_delete,
|
{'actual_rule_num': num_rules_to_delete,
|
||||||
'min_exp_rule_num': min_num_expected,
|
'min_exp_rule_num': min_num_expected,
|
||||||
'max_exp_rule_num': max_num_expected})
|
'max_exp_rule_num': max_num_expected})
|
||||||
|
@ -19,7 +19,6 @@ from oslo_log import log
|
|||||||
from oslo_serialization import jsonutils
|
from oslo_serialization import jsonutils
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LW
|
|
||||||
from vmware_nsx.common import utils
|
from vmware_nsx.common import utils
|
||||||
from vmware_nsx.nsxlib import mh as nsxlib
|
from vmware_nsx.nsxlib import mh as nsxlib
|
||||||
|
|
||||||
@ -141,8 +140,8 @@ def delete_security_profile(cluster, spid):
|
|||||||
except exceptions.NotFound:
|
except exceptions.NotFound:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
# This is not necessarily an error condition
|
# This is not necessarily an error condition
|
||||||
LOG.warning(_LW("Unable to find security profile %s on NSX "
|
LOG.warning("Unable to find security profile %s on NSX "
|
||||||
"backend"), spid)
|
"backend", spid)
|
||||||
|
|
||||||
|
|
||||||
def summarize_security_group_rules(logical_port_rules):
|
def summarize_security_group_rules(logical_port_rules):
|
||||||
|
@ -20,7 +20,7 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
from oslo_serialization import jsonutils
|
from oslo_serialization import jsonutils
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE, _LI, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.api_client import exception as api_exc
|
from vmware_nsx.api_client import exception as api_exc
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
from vmware_nsx.common import utils
|
from vmware_nsx.common import utils
|
||||||
@ -148,7 +148,7 @@ def update_lswitch(cluster, lswitch_id, display_name,
|
|||||||
return nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(lswitch_obj),
|
return nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(lswitch_obj),
|
||||||
cluster=cluster)
|
cluster=cluster)
|
||||||
except exception.NotFound as e:
|
except exception.NotFound as e:
|
||||||
LOG.error(_LE("Network not found, Error: %s"), str(e))
|
LOG.error("Network not found, Error: %s", str(e))
|
||||||
raise exception.NetworkNotFound(net_id=lswitch_id)
|
raise exception.NetworkNotFound(net_id=lswitch_id)
|
||||||
|
|
||||||
|
|
||||||
@ -163,7 +163,7 @@ def delete_networks(cluster, net_id, lswitch_ids):
|
|||||||
try:
|
try:
|
||||||
nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
|
nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
|
||||||
except exception.NotFound as e:
|
except exception.NotFound as e:
|
||||||
LOG.error(_LE("Network not found, Error: %s"), str(e))
|
LOG.error("Network not found, Error: %s", str(e))
|
||||||
raise exception.NetworkNotFound(net_id=ls_id)
|
raise exception.NetworkNotFound(net_id=ls_id)
|
||||||
|
|
||||||
|
|
||||||
@ -186,7 +186,7 @@ def delete_port(cluster, switch, port):
|
|||||||
try:
|
try:
|
||||||
nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster)
|
nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster)
|
||||||
except exception.NotFound as e:
|
except exception.NotFound as e:
|
||||||
LOG.error(_LE("Port or Network not found, Error: %s"), str(e))
|
LOG.error("Port or Network not found, Error: %s", str(e))
|
||||||
raise exception.PortNotFoundOnNetwork(
|
raise exception.PortNotFoundOnNetwork(
|
||||||
net_id=switch, port_id=port)
|
net_id=switch, port_id=port)
|
||||||
except api_exc.NsxApiException:
|
except api_exc.NsxApiException:
|
||||||
@ -245,7 +245,7 @@ def get_ports(cluster, networks=None, devices=None, tenants=None):
|
|||||||
if not ports:
|
if not ports:
|
||||||
ports = nsxlib.get_all_query_pages(lport_query_path, cluster)
|
ports = nsxlib.get_all_query_pages(lport_query_path, cluster)
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
LOG.warning(_LW("Lswitch %s not found in NSX"), lswitch)
|
LOG.warning("Lswitch %s not found in NSX", lswitch)
|
||||||
ports = None
|
ports = None
|
||||||
|
|
||||||
if ports:
|
if ports:
|
||||||
@ -279,16 +279,16 @@ def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id):
|
|||||||
num_results = len(res["results"])
|
num_results = len(res["results"])
|
||||||
if num_results >= 1:
|
if num_results >= 1:
|
||||||
if num_results > 1:
|
if num_results > 1:
|
||||||
LOG.warning(_LW("Found '%(num_ports)d' ports with "
|
LOG.warning("Found '%(num_ports)d' ports with "
|
||||||
"q_port_id tag: '%(neutron_port_id)s'. "
|
"q_port_id tag: '%(neutron_port_id)s'. "
|
||||||
"Only 1 was expected."),
|
"Only 1 was expected.",
|
||||||
{'num_ports': num_results,
|
{'num_ports': num_results,
|
||||||
'neutron_port_id': neutron_port_id})
|
'neutron_port_id': neutron_port_id})
|
||||||
return res["results"][0]
|
return res["results"][0]
|
||||||
|
|
||||||
|
|
||||||
def get_port(cluster, network, port, relations=None):
|
def get_port(cluster, network, port, relations=None):
|
||||||
LOG.info(_LI("get_port() %(network)s %(port)s"),
|
LOG.info("get_port() %(network)s %(port)s",
|
||||||
{'network': network, 'port': port})
|
{'network': network, 'port': port})
|
||||||
uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?"
|
uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?"
|
||||||
if relations:
|
if relations:
|
||||||
@ -296,7 +296,7 @@ def get_port(cluster, network, port, relations=None):
|
|||||||
try:
|
try:
|
||||||
return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
|
return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
|
||||||
except exception.NotFound as e:
|
except exception.NotFound as e:
|
||||||
LOG.error(_LE("Port or Network not found, Error: %s"), str(e))
|
LOG.error("Port or Network not found, Error: %s", str(e))
|
||||||
raise exception.PortNotFoundOnNetwork(
|
raise exception.PortNotFoundOnNetwork(
|
||||||
port_id=port, net_id=network)
|
port_id=port, net_id=network)
|
||||||
|
|
||||||
@ -327,7 +327,7 @@ def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id,
|
|||||||
{'result': result['uuid'], 'uuid': lswitch_uuid})
|
{'result': result['uuid'], 'uuid': lswitch_uuid})
|
||||||
return result
|
return result
|
||||||
except exception.NotFound as e:
|
except exception.NotFound as e:
|
||||||
LOG.error(_LE("Port or Network not found, Error: %s"), str(e))
|
LOG.error("Port or Network not found, Error: %s", str(e))
|
||||||
raise exception.PortNotFoundOnNetwork(
|
raise exception.PortNotFoundOnNetwork(
|
||||||
port_id=lport_uuid, net_id=lswitch_uuid)
|
port_id=lport_uuid, net_id=lswitch_uuid)
|
||||||
|
|
||||||
@ -369,7 +369,7 @@ def get_port_status(cluster, lswitch_id, port_id):
|
|||||||
"/ws.v1/lswitch/%s/lport/%s/status" %
|
"/ws.v1/lswitch/%s/lport/%s/status" %
|
||||||
(lswitch_id, port_id), cluster=cluster)
|
(lswitch_id, port_id), cluster=cluster)
|
||||||
except exception.NotFound as e:
|
except exception.NotFound as e:
|
||||||
LOG.error(_LE("Port not found, Error: %s"), str(e))
|
LOG.error("Port not found, Error: %s", str(e))
|
||||||
raise exception.PortNotFoundOnNetwork(
|
raise exception.PortNotFoundOnNetwork(
|
||||||
port_id=port_id, net_id=lswitch_id)
|
port_id=port_id, net_id=lswitch_id)
|
||||||
if r['link_status_up'] is True:
|
if r['link_status_up'] is True:
|
||||||
|
@ -49,7 +49,7 @@ from neutron_lib.api import validators
|
|||||||
from neutron_lib import exceptions as n_exc
|
from neutron_lib import exceptions as n_exc
|
||||||
|
|
||||||
import vmware_nsx
|
import vmware_nsx
|
||||||
from vmware_nsx._i18n import _, _LE, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import config # noqa
|
from vmware_nsx.common import config # noqa
|
||||||
from vmware_nsx.common import nsx_constants
|
from vmware_nsx.common import nsx_constants
|
||||||
from vmware_nsx.common import utils as c_utils
|
from vmware_nsx.common import utils as c_utils
|
||||||
@ -162,9 +162,9 @@ class NsxDvsV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
def _dvs_create_network(self, context, network):
|
def _dvs_create_network(self, context, network):
|
||||||
net_data = network['network']
|
net_data = network['network']
|
||||||
if net_data['admin_state_up'] is False:
|
if net_data['admin_state_up'] is False:
|
||||||
LOG.warning(_LW("Network with admin_state_up=False are not yet "
|
LOG.warning("Network with admin_state_up=False are not yet "
|
||||||
"supported by this plugin. Ignoring setting for "
|
"supported by this plugin. Ignoring setting for "
|
||||||
"network %s"), net_data.get('name', '<unknown>'))
|
"network %s", net_data.get('name', '<unknown>'))
|
||||||
net_data['id'] = str(uuid.uuid4())
|
net_data['id'] = str(uuid.uuid4())
|
||||||
vlan_tag = 0
|
vlan_tag = 0
|
||||||
if net_data.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.VLAN:
|
if net_data.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.VLAN:
|
||||||
@ -211,7 +211,7 @@ class NsxDvsV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
vlan_tag)
|
vlan_tag)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE('Failed to create network'))
|
LOG.exception('Failed to create network')
|
||||||
if (net_data.get(pnet.NETWORK_TYPE) !=
|
if (net_data.get(pnet.NETWORK_TYPE) !=
|
||||||
c_utils.NetworkTypes.PORTGROUP):
|
c_utils.NetworkTypes.PORTGROUP):
|
||||||
self._dvs.delete_port_group(dvs_id)
|
self._dvs.delete_port_group(dvs_id)
|
||||||
@ -278,7 +278,7 @@ class NsxDvsV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
bindings[0].binding_type != c_utils.NetworkTypes.PORTGROUP):
|
bindings[0].binding_type != c_utils.NetworkTypes.PORTGROUP):
|
||||||
self._dvs.delete_port_group(dvs_id)
|
self._dvs.delete_port_group(dvs_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Unable to delete DVS port group %s'), id)
|
LOG.exception('Unable to delete DVS port group %s', id)
|
||||||
self.handle_network_dhcp_access(context, id, action='delete_network')
|
self.handle_network_dhcp_access(context, id, action='delete_network')
|
||||||
|
|
||||||
def delete_network(self, context, id):
|
def delete_network(self, context, id):
|
||||||
|
@ -67,7 +67,7 @@ from neutron_lib.api.definitions import portbindings as pbin
|
|||||||
from neutron_lib.api.definitions import provider_net as pnet
|
from neutron_lib.api.definitions import provider_net as pnet
|
||||||
|
|
||||||
import vmware_nsx
|
import vmware_nsx
|
||||||
from vmware_nsx._i18n import _, _LE, _LI, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.api_client import exception as api_exc
|
from vmware_nsx.api_client import exception as api_exc
|
||||||
from vmware_nsx.common import config # noqa
|
from vmware_nsx.common import config # noqa
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
@ -150,8 +150,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
router=l3_db_models.Router,
|
router=l3_db_models.Router,
|
||||||
floatingip=l3_db_models.FloatingIP)
|
floatingip=l3_db_models.FloatingIP)
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
LOG.warning(_LW("The NSX-MH plugin is deprecated and may be removed "
|
LOG.warning("The NSX-MH plugin is deprecated and may be removed "
|
||||||
"in the O or the P cycle"))
|
"in the O or the P cycle")
|
||||||
super(NsxPluginV2, self).__init__()
|
super(NsxPluginV2, self).__init__()
|
||||||
# TODO(salv-orlando): Replace These dicts with
|
# TODO(salv-orlando): Replace These dicts with
|
||||||
# collections.defaultdict for better handling of default values
|
# collections.defaultdict for better handling of default values
|
||||||
@ -242,8 +242,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
self._is_default_net_gw_in_sync = True
|
self._is_default_net_gw_in_sync = True
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Unable to process default l2 gw service: "
|
LOG.exception("Unable to process default l2 gw service: "
|
||||||
"%s"),
|
"%s",
|
||||||
def_l2_gw_uuid)
|
def_l2_gw_uuid)
|
||||||
|
|
||||||
def _build_ip_address_list(self, context, fixed_ips, subnet_ids=None):
|
def _build_ip_address_list(self, context, fixed_ips, subnet_ids=None):
|
||||||
@ -280,8 +280,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
port_data.get('mac_address'))
|
port_data.get('mac_address'))
|
||||||
LOG.debug("Created NSX router port:%s", lrouter_port['uuid'])
|
LOG.debug("Created NSX router port:%s", lrouter_port['uuid'])
|
||||||
except api_exc.NsxApiException:
|
except api_exc.NsxApiException:
|
||||||
LOG.exception(_LE("Unable to create port on NSX logical router "
|
LOG.exception("Unable to create port on NSX logical router "
|
||||||
"%s"),
|
"%s",
|
||||||
nsx_router_id)
|
nsx_router_id)
|
||||||
raise nsx_exc.NsxPluginException(
|
raise nsx_exc.NsxPluginException(
|
||||||
err_msg=_("Unable to create logical router port for neutron "
|
err_msg=_("Unable to create logical router port for neutron "
|
||||||
@ -369,9 +369,9 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
# Must remove NSX logical port
|
# Must remove NSX logical port
|
||||||
routerlib.delete_router_lport(cluster, nsx_router_id,
|
routerlib.delete_router_lport(cluster, nsx_router_id,
|
||||||
nsx_router_port_id)
|
nsx_router_port_id)
|
||||||
LOG.exception(_LE("Unable to plug attachment in NSX logical "
|
LOG.exception("Unable to plug attachment in NSX logical "
|
||||||
"router port %(r_port_id)s, associated with "
|
"router port %(r_port_id)s, associated with "
|
||||||
"Neutron %(q_port_id)s"),
|
"Neutron %(q_port_id)s",
|
||||||
{'r_port_id': nsx_router_port_id,
|
{'r_port_id': nsx_router_port_id,
|
||||||
'q_port_id': port_data.get('id')})
|
'q_port_id': port_data.get('id')})
|
||||||
raise nsx_exc.NsxPluginException(
|
raise nsx_exc.NsxPluginException(
|
||||||
@ -444,8 +444,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
# rollback the neutron-nsx port mapping
|
# rollback the neutron-nsx port mapping
|
||||||
nsx_db.delete_neutron_nsx_port_mapping(context.session,
|
nsx_db.delete_neutron_nsx_port_mapping(context.session,
|
||||||
port_id)
|
port_id)
|
||||||
LOG.exception(_LE("An exception occurred while creating the "
|
LOG.exception("An exception occurred while creating the "
|
||||||
"neutron port %s on the NSX plaform"), port_id)
|
"neutron port %s on the NSX plaform", port_id)
|
||||||
|
|
||||||
def _nsx_create_port(self, context, port_data):
|
def _nsx_create_port(self, context, port_data):
|
||||||
"""Driver for creating a logical switch port on NSX platform."""
|
"""Driver for creating a logical switch port on NSX platform."""
|
||||||
@ -455,8 +455,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
# However, in order to not break unit tests, we need to still create
|
# However, in order to not break unit tests, we need to still create
|
||||||
# the DB object and return success
|
# the DB object and return success
|
||||||
if self._network_is_external(context, port_data['network_id']):
|
if self._network_is_external(context, port_data['network_id']):
|
||||||
LOG.info(_LI("NSX plugin does not support regular VIF ports on "
|
LOG.info("NSX plugin does not support regular VIF ports on "
|
||||||
"external networks. Port %s will be down."),
|
"external networks. Port %s will be down.",
|
||||||
port_data['network_id'])
|
port_data['network_id'])
|
||||||
# No need to actually update the DB state - the default is down
|
# No need to actually update the DB state - the default is down
|
||||||
return port_data
|
return port_data
|
||||||
@ -488,9 +488,9 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
if (port_data['device_owner'] == constants.DEVICE_OWNER_DHCP and
|
if (port_data['device_owner'] == constants.DEVICE_OWNER_DHCP and
|
||||||
isinstance(e.inner_exception, sql_exc.IntegrityError)):
|
isinstance(e.inner_exception, sql_exc.IntegrityError)):
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW("Concurrent network deletion detected; Back-end "
|
"Concurrent network deletion detected; Back-end "
|
||||||
"Port %(nsx_id)s creation to be rolled back for "
|
"Port %(nsx_id)s creation to be rolled back for "
|
||||||
"Neutron port: %(neutron_id)s"),
|
"Neutron port: %(neutron_id)s",
|
||||||
{'nsx_id': lport['uuid'],
|
{'nsx_id': lport['uuid'],
|
||||||
'neutron_id': port_data['id']})
|
'neutron_id': port_data['id']})
|
||||||
if selected_lswitch and lport:
|
if selected_lswitch and lport:
|
||||||
@ -507,8 +507,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
# does not make sense. However we cannot raise as this would break
|
# does not make sense. However we cannot raise as this would break
|
||||||
# unit tests.
|
# unit tests.
|
||||||
if self._network_is_external(context, port_data['network_id']):
|
if self._network_is_external(context, port_data['network_id']):
|
||||||
LOG.info(_LI("NSX plugin does not support regular VIF ports on "
|
LOG.info("NSX plugin does not support regular VIF ports on "
|
||||||
"external networks. Port %s will be down."),
|
"external networks. Port %s will be down.",
|
||||||
port_data['network_id'])
|
port_data['network_id'])
|
||||||
return
|
return
|
||||||
nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
|
nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
|
||||||
@ -526,7 +526,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
{'port_id': port_data['id'],
|
{'port_id': port_data['id'],
|
||||||
'net_id': port_data['network_id']})
|
'net_id': port_data['network_id']})
|
||||||
except n_exc.NotFound:
|
except n_exc.NotFound:
|
||||||
LOG.warning(_LW("Port %s not found in NSX"), port_data['id'])
|
LOG.warning("Port %s not found in NSX", port_data['id'])
|
||||||
|
|
||||||
def _nsx_delete_router_port(self, context, port_data):
|
def _nsx_delete_router_port(self, context, port_data):
|
||||||
# Delete logical router port
|
# Delete logical router port
|
||||||
@ -536,9 +536,9 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
context.session, self.cluster, port_data['id'])
|
context.session, self.cluster, port_data['id'])
|
||||||
if not nsx_port_id:
|
if not nsx_port_id:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW("Neutron port %(port_id)s not found on NSX backend. "
|
"Neutron port %(port_id)s not found on NSX backend. "
|
||||||
"Terminating delete operation. A dangling router port "
|
"Terminating delete operation. A dangling router port "
|
||||||
"might have been left on router %(router_id)s"),
|
"might have been left on router %(router_id)s",
|
||||||
{'port_id': port_data['id'],
|
{'port_id': port_data['id'],
|
||||||
'router_id': nsx_router_id})
|
'router_id': nsx_router_id})
|
||||||
return
|
return
|
||||||
@ -551,8 +551,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
# Do not raise because the issue might as well be that the
|
# Do not raise because the issue might as well be that the
|
||||||
# router has already been deleted, so there would be nothing
|
# router has already been deleted, so there would be nothing
|
||||||
# to do here
|
# to do here
|
||||||
LOG.exception(_LE("Ignoring exception as this means the peer "
|
LOG.exception("Ignoring exception as this means the peer "
|
||||||
"for port '%s' has already been deleted."),
|
"for port '%s' has already been deleted.",
|
||||||
nsx_port_id)
|
nsx_port_id)
|
||||||
|
|
||||||
# Delete logical switch port
|
# Delete logical switch port
|
||||||
@ -714,8 +714,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
# However, in order to not break unit tests, we need to still create
|
# However, in order to not break unit tests, we need to still create
|
||||||
# the DB object and return success
|
# the DB object and return success
|
||||||
if self._network_is_external(context, port_data['network_id']):
|
if self._network_is_external(context, port_data['network_id']):
|
||||||
LOG.info(_LI("NSX plugin does not support regular VIF ports on "
|
LOG.info("NSX plugin does not support regular VIF ports on "
|
||||||
"external networks. Port %s will be down."),
|
"external networks. Port %s will be down.",
|
||||||
port_data['network_id'])
|
port_data['network_id'])
|
||||||
# No need to actually update the DB state - the default is down
|
# No need to actually update the DB state - the default is down
|
||||||
return port_data
|
return port_data
|
||||||
@ -941,9 +941,9 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
net_data[key] = None
|
net_data[key] = None
|
||||||
# FIXME(arosen) implement admin_state_up = False in NSX
|
# FIXME(arosen) implement admin_state_up = False in NSX
|
||||||
if net_data['admin_state_up'] is False:
|
if net_data['admin_state_up'] is False:
|
||||||
LOG.warning(_LW("Network with admin_state_up=False are not yet "
|
LOG.warning("Network with admin_state_up=False are not yet "
|
||||||
"supported by this plugin. Ignoring setting for "
|
"supported by this plugin. Ignoring setting for "
|
||||||
"network %s"), net_data.get('name', '<unknown>'))
|
"network %s", net_data.get('name', '<unknown>'))
|
||||||
transport_zone_config = self._convert_to_nsx_transport_zones(
|
transport_zone_config = self._convert_to_nsx_transport_zones(
|
||||||
self.cluster, net_data)
|
self.cluster, net_data)
|
||||||
external = net_data.get(ext_net_extn.EXTERNAL)
|
external = net_data.get(ext_net_extn.EXTERNAL)
|
||||||
@ -1026,8 +1026,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
try:
|
try:
|
||||||
switchlib.delete_networks(self.cluster, id, lswitch_ids)
|
switchlib.delete_networks(self.cluster, id, lswitch_ids)
|
||||||
except n_exc.NotFound:
|
except n_exc.NotFound:
|
||||||
LOG.warning(_LW("The following logical switches were not "
|
LOG.warning("The following logical switches were not "
|
||||||
"found on the NSX backend:%s"), lswitch_ids)
|
"found on the NSX backend:%s", lswitch_ids)
|
||||||
self.handle_network_dhcp_access(context, id, action='delete_network')
|
self.handle_network_dhcp_access(context, id, action='delete_network')
|
||||||
LOG.debug("Delete network complete for network: %s", id)
|
LOG.debug("Delete network complete for network: %s", id)
|
||||||
|
|
||||||
@ -1086,17 +1086,17 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
nsx_switch_ids = nsx_utils.get_nsx_switch_ids(
|
nsx_switch_ids = nsx_utils.get_nsx_switch_ids(
|
||||||
context.session, self.cluster, id)
|
context.session, self.cluster, id)
|
||||||
if not nsx_switch_ids or len(nsx_switch_ids) < 1:
|
if not nsx_switch_ids or len(nsx_switch_ids) < 1:
|
||||||
LOG.warning(_LW("Unable to find NSX mappings for neutron "
|
LOG.warning("Unable to find NSX mappings for neutron "
|
||||||
"network:%s"), id)
|
"network:%s", id)
|
||||||
try:
|
try:
|
||||||
switchlib.update_lswitch(self.cluster,
|
switchlib.update_lswitch(self.cluster,
|
||||||
nsx_switch_ids[0],
|
nsx_switch_ids[0],
|
||||||
network['network']['name'])
|
network['network']['name'])
|
||||||
except api_exc.NsxApiException as e:
|
except api_exc.NsxApiException as e:
|
||||||
LOG.warning(_LW("Logical switch update on NSX backend failed. "
|
LOG.warning("Logical switch update on NSX backend failed. "
|
||||||
"Neutron network id:%(net_id)s; "
|
"Neutron network id:%(net_id)s; "
|
||||||
"NSX lswitch id:%(lswitch_id)s;"
|
"NSX lswitch id:%(lswitch_id)s;"
|
||||||
"Error:%(error)s"),
|
"Error:%(error)s",
|
||||||
{'net_id': id, 'lswitch_id': nsx_switch_ids[0],
|
{'net_id': id, 'lswitch_id': nsx_switch_ids[0],
|
||||||
'error': e})
|
'error': e})
|
||||||
|
|
||||||
@ -1179,8 +1179,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
LOG.debug("port created on NSX backend for tenant "
|
LOG.debug("port created on NSX backend for tenant "
|
||||||
"%(tenant_id)s: (%(id)s)", port_data)
|
"%(tenant_id)s: (%(id)s)", port_data)
|
||||||
except n_exc.NotFound:
|
except n_exc.NotFound:
|
||||||
LOG.warning(_LW("Logical switch for network %s was not "
|
LOG.warning("Logical switch for network %s was not "
|
||||||
"found in NSX."), port_data['network_id'])
|
"found in NSX.", port_data['network_id'])
|
||||||
# Put port in error on neutron DB
|
# Put port in error on neutron DB
|
||||||
with context.session.begin(subtransactions=True):
|
with context.session.begin(subtransactions=True):
|
||||||
port = self._get_port(context, neutron_port_id)
|
port = self._get_port(context, neutron_port_id)
|
||||||
@ -1190,8 +1190,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
except Exception:
|
except Exception:
|
||||||
# Port must be removed from neutron DB
|
# Port must be removed from neutron DB
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Unable to create port or set port "
|
LOG.error("Unable to create port or set port "
|
||||||
"attachment in NSX."))
|
"attachment in NSX.")
|
||||||
with context.session.begin(subtransactions=True):
|
with context.session.begin(subtransactions=True):
|
||||||
self.ipam.delete_port(context, neutron_port_id)
|
self.ipam.delete_port(context, neutron_port_id)
|
||||||
# this extra lookup is necessary to get the
|
# this extra lookup is necessary to get the
|
||||||
@ -1322,7 +1322,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
# FIXME(arosen) improve exception handling.
|
# FIXME(arosen) improve exception handling.
|
||||||
except Exception:
|
except Exception:
|
||||||
ret_port['status'] = constants.PORT_STATUS_ERROR
|
ret_port['status'] = constants.PORT_STATUS_ERROR
|
||||||
LOG.exception(_LE("Unable to update port id: %s."),
|
LOG.exception("Unable to update port id: %s.",
|
||||||
nsx_port_id)
|
nsx_port_id)
|
||||||
|
|
||||||
# If nsx_port_id is not in database or in nsx put in error state.
|
# If nsx_port_id is not in database or in nsx put in error state.
|
||||||
@ -1419,10 +1419,10 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
"L3GatewayAttachment",
|
"L3GatewayAttachment",
|
||||||
self.cluster.default_l3_gw_service_uuid)
|
self.cluster.default_l3_gw_service_uuid)
|
||||||
except nsx_exc.NsxPluginException:
|
except nsx_exc.NsxPluginException:
|
||||||
LOG.exception(_LE("Unable to create L3GW port on logical router "
|
LOG.exception("Unable to create L3GW port on logical router "
|
||||||
"%(router_uuid)s. Verify Default Layer-3 "
|
"%(router_uuid)s. Verify Default Layer-3 "
|
||||||
"Gateway service %(def_l3_gw_svc)s id is "
|
"Gateway service %(def_l3_gw_svc)s id is "
|
||||||
"correct"),
|
"correct",
|
||||||
{'router_uuid': lrouter['uuid'],
|
{'router_uuid': lrouter['uuid'],
|
||||||
'def_l3_gw_svc':
|
'def_l3_gw_svc':
|
||||||
self.cluster.default_l3_gw_service_uuid})
|
self.cluster.default_l3_gw_service_uuid})
|
||||||
@ -1513,13 +1513,13 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
# As setting gateway failed, the router must be deleted
|
# As setting gateway failed, the router must be deleted
|
||||||
# in order to ensure atomicity
|
# in order to ensure atomicity
|
||||||
router_id = router_db['id']
|
router_id = router_db['id']
|
||||||
LOG.warning(_LW("Failed to set gateway info for router "
|
LOG.warning("Failed to set gateway info for router "
|
||||||
"being created:%s - removing router"),
|
"being created:%s - removing router",
|
||||||
router_id)
|
router_id)
|
||||||
self.delete_router(context, router_id)
|
self.delete_router(context, router_id)
|
||||||
LOG.info(_LI("Create router failed while setting external "
|
LOG.info("Create router failed while setting external "
|
||||||
"gateway. Router:%s has been removed from "
|
"gateway. Router:%s has been removed from "
|
||||||
"DB and backend"),
|
"DB and backend",
|
||||||
router_id)
|
router_id)
|
||||||
return self._make_router_dict(router_db)
|
return self._make_router_dict(router_db)
|
||||||
|
|
||||||
@ -1624,8 +1624,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
self._delete_lrouter(context, router_id, nsx_router_id)
|
self._delete_lrouter(context, router_id, nsx_router_id)
|
||||||
except n_exc.NotFound:
|
except n_exc.NotFound:
|
||||||
# This is not a fatal error, but needs to be logged
|
# This is not a fatal error, but needs to be logged
|
||||||
LOG.warning(_LW("Logical router '%s' not found "
|
LOG.warning("Logical router '%s' not found "
|
||||||
"on NSX Platform"), router_id)
|
"on NSX Platform", router_id)
|
||||||
except api_exc.NsxApiException:
|
except api_exc.NsxApiException:
|
||||||
raise nsx_exc.NsxPluginException(
|
raise nsx_exc.NsxPluginException(
|
||||||
err_msg=(_("Unable to delete logical router '%s' "
|
err_msg=(_("Unable to delete logical router '%s' "
|
||||||
@ -1635,8 +1635,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
# not exist anymore in the backend. This is not a fatal condition,
|
# not exist anymore in the backend. This is not a fatal condition,
|
||||||
# but will result in an exception is "None" is passed to
|
# but will result in an exception is "None" is passed to
|
||||||
# _delete_lrouter
|
# _delete_lrouter
|
||||||
LOG.warning(_LW("No mapping found for logical router '%s' "
|
LOG.warning("No mapping found for logical router '%s' "
|
||||||
"on NSX Platform"), router_id)
|
"on NSX Platform", router_id)
|
||||||
|
|
||||||
# Remove the NSX mapping first in order to ensure a mapping to
|
# Remove the NSX mapping first in order to ensure a mapping to
|
||||||
# a non-existent NSX router is not left in the DB in case of
|
# a non-existent NSX router is not left in the DB in case of
|
||||||
@ -1646,10 +1646,10 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
context.session, router_id)
|
context.session, router_id)
|
||||||
except db_exc.DBError as d_exc:
|
except db_exc.DBError as d_exc:
|
||||||
# Do not make this error fatal
|
# Do not make this error fatal
|
||||||
LOG.warning(_LW("Unable to remove NSX mapping for Neutron router "
|
LOG.warning("Unable to remove NSX mapping for Neutron router "
|
||||||
"%(router_id)s because of the following exception:"
|
"%(router_id)s because of the following exception:"
|
||||||
"%(d_exc)s"), {'router_id': router_id,
|
"%(d_exc)s", {'router_id': router_id,
|
||||||
'd_exc': str(d_exc)})
|
'd_exc': str(d_exc)})
|
||||||
# Perform the actual delete on the Neutron DB
|
# Perform the actual delete on the Neutron DB
|
||||||
super(NsxPluginV2, self).delete_router(context, router_id)
|
super(NsxPluginV2, self).delete_router(context, router_id)
|
||||||
|
|
||||||
@ -1805,8 +1805,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
raise_on_len_mismatch=False,
|
raise_on_len_mismatch=False,
|
||||||
destination_ip_addresses=subnet['cidr'])
|
destination_ip_addresses=subnet['cidr'])
|
||||||
except n_exc.NotFound:
|
except n_exc.NotFound:
|
||||||
LOG.error(_LE("Logical router resource %s not found "
|
LOG.error("Logical router resource %s not found "
|
||||||
"on NSX platform"), router_id)
|
"on NSX platform", router_id)
|
||||||
except api_exc.NsxApiException:
|
except api_exc.NsxApiException:
|
||||||
raise nsx_exc.NsxPluginException(
|
raise nsx_exc.NsxPluginException(
|
||||||
err_msg=(_("Unable to update logical router"
|
err_msg=(_("Unable to update logical router"
|
||||||
@ -1841,13 +1841,13 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
|
|
||||||
except api_exc.NsxApiException:
|
except api_exc.NsxApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("An error occurred while removing NAT rules "
|
LOG.exception("An error occurred while removing NAT rules "
|
||||||
"on the NSX platform for floating ip:%s"),
|
"on the NSX platform for floating ip:%s",
|
||||||
floating_ip_address)
|
floating_ip_address)
|
||||||
except nsx_exc.NatRuleMismatch:
|
except nsx_exc.NatRuleMismatch:
|
||||||
# Do not surface to the user
|
# Do not surface to the user
|
||||||
LOG.warning(_LW("An incorrect number of matching NAT rules "
|
LOG.warning("An incorrect number of matching NAT rules "
|
||||||
"was found on the NSX platform"))
|
"was found on the NSX platform")
|
||||||
|
|
||||||
def _remove_floatingip_address(self, context, fip_db):
|
def _remove_floatingip_address(self, context, fip_db):
|
||||||
# Remove floating IP address from logical router port
|
# Remove floating IP address from logical router port
|
||||||
@ -1960,10 +1960,10 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
self.cluster, nsx_router_id, nsx_gw_port_id,
|
self.cluster, nsx_router_id, nsx_gw_port_id,
|
||||||
ips_to_add=nsx_floating_ips, ips_to_remove=[])
|
ips_to_add=nsx_floating_ips, ips_to_remove=[])
|
||||||
except api_exc.NsxApiException:
|
except api_exc.NsxApiException:
|
||||||
LOG.exception(_LE("An error occurred while creating NAT "
|
LOG.exception("An error occurred while creating NAT "
|
||||||
"rules on the NSX platform for floating "
|
"rules on the NSX platform for floating "
|
||||||
"ip:%(floating_ip)s mapped to "
|
"ip:%(floating_ip)s mapped to "
|
||||||
"internal ip:%(internal_ip)s"),
|
"internal ip:%(internal_ip)s",
|
||||||
{'floating_ip': floating_ip,
|
{'floating_ip': floating_ip,
|
||||||
'internal_ip': internal_ip})
|
'internal_ip': internal_ip})
|
||||||
msg = _("Failed to update NAT rules for floatingip update")
|
msg = _("Failed to update NAT rules for floatingip update")
|
||||||
@ -2020,7 +2020,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
LOG.debug("The port '%s' is not associated with floating IPs",
|
LOG.debug("The port '%s' is not associated with floating IPs",
|
||||||
port_id)
|
port_id)
|
||||||
except n_exc.NotFound:
|
except n_exc.NotFound:
|
||||||
LOG.warning(_LW("Nat rules not found in nsx for port: %s"), id)
|
LOG.warning("Nat rules not found in nsx for port: %s", id)
|
||||||
|
|
||||||
# NOTE(ihrachys): L3 agent notifications don't make sense for
|
# NOTE(ihrachys): L3 agent notifications don't make sense for
|
||||||
# NSX VMWare plugin since there is no L3 agent in such setup, so
|
# NSX VMWare plugin since there is no L3 agent in such setup, so
|
||||||
@ -2085,8 +2085,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
except api_exc.ResourceNotFound:
|
except api_exc.ResourceNotFound:
|
||||||
# Do not cause a 500 to be returned to the user if
|
# Do not cause a 500 to be returned to the user if
|
||||||
# the corresponding NSX resource does not exist
|
# the corresponding NSX resource does not exist
|
||||||
LOG.exception(_LE("Unable to remove gateway service from "
|
LOG.exception("Unable to remove gateway service from "
|
||||||
"NSX plaform - the resource was not found"))
|
"NSX plaform - the resource was not found")
|
||||||
|
|
||||||
def get_network_gateway(self, context, id, fields=None):
|
def get_network_gateway(self, context, id, fields=None):
|
||||||
# Ensure the default gateway in the config file is in sync with the db
|
# Ensure the default gateway in the config file is in sync with the db
|
||||||
@ -2114,8 +2114,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
except api_exc.NsxApiException:
|
except api_exc.NsxApiException:
|
||||||
# Consider backend failures as non-fatal, but still warn
|
# Consider backend failures as non-fatal, but still warn
|
||||||
# because this might indicate something dodgy is going on
|
# because this might indicate something dodgy is going on
|
||||||
LOG.warning(_LW("Unable to update name on NSX backend "
|
LOG.warning("Unable to update name on NSX backend "
|
||||||
"for network gateway: %s"), id)
|
"for network gateway: %s", id)
|
||||||
return super(NsxPluginV2, self).update_network_gateway(
|
return super(NsxPluginV2, self).update_network_gateway(
|
||||||
context, id, network_gateway)
|
context, id, network_gateway)
|
||||||
|
|
||||||
@ -2141,8 +2141,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
|
|
||||||
def _rollback_gw_device(self, context, device_id, gw_data=None,
|
def _rollback_gw_device(self, context, device_id, gw_data=None,
|
||||||
new_status=None, is_create=False):
|
new_status=None, is_create=False):
|
||||||
LOG.error(_LE("Rolling back database changes for gateway device %s "
|
LOG.error("Rolling back database changes for gateway device %s "
|
||||||
"because of an error in the NSX backend"), device_id)
|
"because of an error in the NSX backend", device_id)
|
||||||
with context.session.begin(subtransactions=True):
|
with context.session.begin(subtransactions=True):
|
||||||
query = self._model_query(
|
query = self._model_query(
|
||||||
context, nsx_models.NetworkGatewayDevice).filter(
|
context, nsx_models.NetworkGatewayDevice).filter(
|
||||||
@ -2321,16 +2321,16 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
try:
|
try:
|
||||||
l2gwlib.delete_gateway_device(self.cluster, nsx_device_id)
|
l2gwlib.delete_gateway_device(self.cluster, nsx_device_id)
|
||||||
except n_exc.NotFound:
|
except n_exc.NotFound:
|
||||||
LOG.warning(_LW("Removal of gateway device: %(neutron_id)s failed "
|
LOG.warning("Removal of gateway device: %(neutron_id)s failed "
|
||||||
"on NSX backend (NSX id:%(nsx_id)s) because the "
|
"on NSX backend (NSX id:%(nsx_id)s) because the "
|
||||||
"NSX resource was not found"),
|
"NSX resource was not found",
|
||||||
{'neutron_id': device_id, 'nsx_id': nsx_device_id})
|
{'neutron_id': device_id, 'nsx_id': nsx_device_id})
|
||||||
except api_exc.NsxApiException:
|
except api_exc.NsxApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
# In this case a 500 should be returned
|
# In this case a 500 should be returned
|
||||||
LOG.exception(_LE("Removal of gateway device: %(neutron_id)s "
|
LOG.exception("Removal of gateway device: %(neutron_id)s "
|
||||||
"failed on NSX backend (NSX id:%(nsx_id)s). "
|
"failed on NSX backend (NSX id:%(nsx_id)s). "
|
||||||
"Neutron and NSX states have diverged."),
|
"Neutron and NSX states have diverged.",
|
||||||
{'neutron_id': device_id,
|
{'neutron_id': device_id,
|
||||||
'nsx_id': nsx_device_id})
|
'nsx_id': nsx_device_id})
|
||||||
|
|
||||||
@ -2376,8 +2376,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
# Reverting the DB change is not really worthwhile
|
# Reverting the DB change is not really worthwhile
|
||||||
# for a mismatch between names. It's the rules that
|
# for a mismatch between names. It's the rules that
|
||||||
# we care about.
|
# we care about.
|
||||||
LOG.error(_LE('Error while updating security profile '
|
LOG.error('Error while updating security profile '
|
||||||
'%(uuid)s with name %(name)s: %(error)s.'),
|
'%(uuid)s with name %(name)s: %(error)s.',
|
||||||
{'uuid': secgroup_id, 'name': name, 'error': e})
|
{'uuid': secgroup_id, 'name': name, 'error': e})
|
||||||
return secgroup
|
return secgroup
|
||||||
|
|
||||||
@ -2408,19 +2408,19 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
except n_exc.NotFound:
|
except n_exc.NotFound:
|
||||||
# The security profile was not found on the backend
|
# The security profile was not found on the backend
|
||||||
# do not fail in this case.
|
# do not fail in this case.
|
||||||
LOG.warning(_LW("The NSX security profile %(sec_profile_id)s, "
|
LOG.warning("The NSX security profile %(sec_profile_id)s, "
|
||||||
"associated with the Neutron security group "
|
"associated with the Neutron security group "
|
||||||
"%(sec_group_id)s was not found on the "
|
"%(sec_group_id)s was not found on the "
|
||||||
"backend"),
|
"backend",
|
||||||
{'sec_profile_id': nsx_sec_profile_id,
|
{'sec_profile_id': nsx_sec_profile_id,
|
||||||
'sec_group_id': security_group_id})
|
'sec_group_id': security_group_id})
|
||||||
except api_exc.NsxApiException:
|
except api_exc.NsxApiException:
|
||||||
# Raise and fail the operation, as there is a problem which
|
# Raise and fail the operation, as there is a problem which
|
||||||
# prevented the sec group from being removed from the backend
|
# prevented the sec group from being removed from the backend
|
||||||
LOG.exception(_LE("An exception occurred while removing the "
|
LOG.exception("An exception occurred while removing the "
|
||||||
"NSX security profile %(sec_profile_id)s, "
|
"NSX security profile %(sec_profile_id)s, "
|
||||||
"associated with Netron security group "
|
"associated with Netron security group "
|
||||||
"%(sec_group_id)s"),
|
"%(sec_group_id)s",
|
||||||
{'sec_profile_id': nsx_sec_profile_id,
|
{'sec_profile_id': nsx_sec_profile_id,
|
||||||
'sec_group_id': security_group_id})
|
'sec_group_id': security_group_id})
|
||||||
raise nsx_exc.NsxPluginException(
|
raise nsx_exc.NsxPluginException(
|
||||||
|
@ -20,7 +20,6 @@ from neutron.db import l3_db
|
|||||||
from neutron_lib import constants
|
from neutron_lib import constants
|
||||||
from neutron_lib import exceptions as n_exc
|
from neutron_lib import exceptions as n_exc
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LW
|
|
||||||
from vmware_nsx.common import locking
|
from vmware_nsx.common import locking
|
||||||
from vmware_nsx.db import nsxv_db
|
from vmware_nsx.db import nsxv_db
|
||||||
from vmware_nsx.plugins.nsx_v.drivers import (
|
from vmware_nsx.plugins.nsx_v.drivers import (
|
||||||
@ -127,8 +126,8 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver):
|
|||||||
|
|
||||||
# This should address cases where the binding remains due to breakage
|
# This should address cases where the binding remains due to breakage
|
||||||
if nsxv_db.get_vdr_dhcp_binding_by_vdr(context.session, router_id):
|
if nsxv_db.get_vdr_dhcp_binding_by_vdr(context.session, router_id):
|
||||||
LOG.warning(_LW("DHCP bind wasn't cleaned for router %s. "
|
LOG.warning("DHCP bind wasn't cleaned for router %s. "
|
||||||
"Cleaning up entry"), router_id)
|
"Cleaning up entry", router_id)
|
||||||
nsxv_db.delete_vdr_dhcp_binding(context.session, router_id)
|
nsxv_db.delete_vdr_dhcp_binding(context.session, router_id)
|
||||||
|
|
||||||
def update_routes(self, context, router_id, newnexthop,
|
def update_routes(self, context, router_id, newnexthop,
|
||||||
@ -476,7 +475,7 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver):
|
|||||||
self.edge_manager.remove_network_from_dhcp_edge(
|
self.edge_manager.remove_network_from_dhcp_edge(
|
||||||
context, network_id, vdr_dhcp_binding['dhcp_edge_id'])
|
context, network_id, vdr_dhcp_binding['dhcp_edge_id'])
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('VDR DHCP binding is missing for %s'),
|
LOG.error('VDR DHCP binding is missing for %s',
|
||||||
router_id)
|
router_id)
|
||||||
|
|
||||||
# Reattach to regular DHCP Edge
|
# Reattach to regular DHCP Edge
|
||||||
|
@ -24,7 +24,6 @@ from neutron_lib import exceptions as n_exc
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from vmware_nsx._i18n import _
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx._i18n import _LE
|
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
from vmware_nsx.common import locking
|
from vmware_nsx.common import locking
|
||||||
from vmware_nsx.db import nsxv_db
|
from vmware_nsx.db import nsxv_db
|
||||||
@ -398,7 +397,7 @@ class RouterSharedDriver(router_driver.RouterBaseDriver):
|
|||||||
router_dict['gateway'] = (
|
router_dict['gateway'] = (
|
||||||
gwp['fixed_ips'][0]['subnet_id'])
|
gwp['fixed_ips'][0]['subnet_id'])
|
||||||
except IndexError:
|
except IndexError:
|
||||||
LOG.error(_LE("Skipping GW port %s with no fixed IP"),
|
LOG.error("Skipping GW port %s with no fixed IP",
|
||||||
gwp['id'])
|
gwp['id'])
|
||||||
subnet_ids = [p['fixed_ips'][0]['subnet_id'] for p in
|
subnet_ids = [p['fixed_ips'][0]['subnet_id'] for p in
|
||||||
intf_ports if p['device_id'] == r['id']]
|
intf_ports if p['device_id'] == r['id']]
|
||||||
|
@ -18,7 +18,7 @@ import stevedore
|
|||||||
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE, _LI
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
@ -32,14 +32,14 @@ class RouterTypeManager(stevedore.named.NamedExtensionManager):
|
|||||||
# Mapping from type name to DriverManager
|
# Mapping from type name to DriverManager
|
||||||
self.drivers = {}
|
self.drivers = {}
|
||||||
|
|
||||||
LOG.info(_LI("Configured router type driver names: %s"),
|
LOG.info("Configured router type driver names: %s",
|
||||||
ROUTER_TYPE_DRIVERS)
|
ROUTER_TYPE_DRIVERS)
|
||||||
super(RouterTypeManager, self).__init__(
|
super(RouterTypeManager, self).__init__(
|
||||||
'vmware_nsx.neutron.nsxv.router_type_drivers',
|
'vmware_nsx.neutron.nsxv.router_type_drivers',
|
||||||
ROUTER_TYPE_DRIVERS,
|
ROUTER_TYPE_DRIVERS,
|
||||||
invoke_on_load=True,
|
invoke_on_load=True,
|
||||||
invoke_args=(plugin,))
|
invoke_args=(plugin,))
|
||||||
LOG.info(_LI("Loaded type driver names: %s"), self.names())
|
LOG.info("Loaded type driver names: %s", self.names())
|
||||||
self._register_types()
|
self._register_types()
|
||||||
self._check_tenant_router_types(cfg.CONF.nsxv.tenant_router_types)
|
self._check_tenant_router_types(cfg.CONF.nsxv.tenant_router_types)
|
||||||
|
|
||||||
@ -47,15 +47,15 @@ class RouterTypeManager(stevedore.named.NamedExtensionManager):
|
|||||||
for ext in self:
|
for ext in self:
|
||||||
router_type = ext.obj.get_type()
|
router_type = ext.obj.get_type()
|
||||||
if router_type in self.drivers:
|
if router_type in self.drivers:
|
||||||
LOG.error(_LE("Type driver '%(new_driver)s' ignored because "
|
LOG.error("Type driver '%(new_driver)s' ignored because "
|
||||||
"type driver '%(old_driver)s' is already "
|
"type driver '%(old_driver)s' is already "
|
||||||
"registered for type '%(type)s'"),
|
"registered for type '%(type)s'",
|
||||||
{'new_driver': ext.name,
|
{'new_driver': ext.name,
|
||||||
'old_driver': self.drivers[router_type].name,
|
'old_driver': self.drivers[router_type].name,
|
||||||
'type': router_type})
|
'type': router_type})
|
||||||
else:
|
else:
|
||||||
self.drivers[router_type] = ext
|
self.drivers[router_type] = ext
|
||||||
LOG.info(_LI("Registered types: %s"), self.drivers.keys())
|
LOG.info("Registered types: %s", self.drivers.keys())
|
||||||
|
|
||||||
def _check_tenant_router_types(self, types):
|
def _check_tenant_router_types(self, types):
|
||||||
self.tenant_router_types = []
|
self.tenant_router_types = []
|
||||||
@ -67,7 +67,7 @@ class RouterTypeManager(stevedore.named.NamedExtensionManager):
|
|||||||
"Service terminated!") % router_type
|
"Service terminated!") % router_type
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise SystemExit(msg)
|
raise SystemExit(msg)
|
||||||
LOG.info(_LI("Tenant router_types: %s"), self.tenant_router_types)
|
LOG.info("Tenant router_types: %s", self.tenant_router_types)
|
||||||
|
|
||||||
def get_tenant_router_driver(self, context, router_type):
|
def get_tenant_router_driver(self, context, router_type):
|
||||||
driver = self.drivers.get(router_type)
|
driver = self.drivers.get(router_type)
|
||||||
|
@ -23,7 +23,7 @@ from neutron_lib import context as neutron_context
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import exceptions as nsxv_exc
|
from vmware_nsx.common import exceptions as nsxv_exc
|
||||||
from vmware_nsx.common import locking
|
from vmware_nsx.common import locking
|
||||||
from vmware_nsx.common import nsxv_constants
|
from vmware_nsx.common import nsxv_constants
|
||||||
@ -192,8 +192,8 @@ class NsxVMetadataProxyHandler(object):
|
|||||||
self.nsxv_plugin.delete_network(context,
|
self.nsxv_plugin.delete_network(context,
|
||||||
internal_net)
|
internal_net)
|
||||||
|
|
||||||
LOG.exception(_LE("Exception %s while creating internal "
|
LOG.exception("Exception %s while creating internal "
|
||||||
"network for metadata service"), e)
|
"network for metadata service", e)
|
||||||
return
|
return
|
||||||
|
|
||||||
# Update the new network_id in DB
|
# Update the new network_id in DB
|
||||||
@ -217,7 +217,7 @@ class NsxVMetadataProxyHandler(object):
|
|||||||
if ports:
|
if ports:
|
||||||
return ports[0]['fixed_ips'][0]['ip_address']
|
return ports[0]['fixed_ips'][0]['ip_address']
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("No port found for metadata for %s"), rtr_id)
|
LOG.error("No port found for metadata for %s", rtr_id)
|
||||||
|
|
||||||
def _get_edge_rtr_id_by_ext_ip(self, context, edge_ip):
|
def _get_edge_rtr_id_by_ext_ip(self, context, edge_ip):
|
||||||
rtr_list = nsxv_db.get_nsxv_internal_edge(
|
rtr_list = nsxv_db.get_nsxv_internal_edge(
|
||||||
@ -299,8 +299,8 @@ class NsxVMetadataProxyHandler(object):
|
|||||||
edge_id = self._get_edge_id_by_rtr_id(context, rtr_id)
|
edge_id = self._get_edge_id_by_rtr_id(context, rtr_id)
|
||||||
if not rtr_id or not edge_id:
|
if not rtr_id or not edge_id:
|
||||||
# log this error and return without the ip, but don't fail
|
# log this error and return without the ip, but don't fail
|
||||||
LOG.error(_LE("Failed find edge for router %(rtr_id)s with ip "
|
LOG.error("Failed find edge for router %(rtr_id)s with ip "
|
||||||
"%(rtr_ext_ip)s"),
|
"%(rtr_ext_ip)s",
|
||||||
{'rtr_id': rtr_id, 'rtr_ext_ip': rtr_ext_ip})
|
{'rtr_id': rtr_id, 'rtr_ext_ip': rtr_ext_ip})
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -310,8 +310,8 @@ class NsxVMetadataProxyHandler(object):
|
|||||||
h, routes = self.nsxv_plugin.nsx_v.vcns.get_routes(edge_id)
|
h, routes = self.nsxv_plugin.nsx_v.vcns.get_routes(edge_id)
|
||||||
except exceptions.ResourceNotFound as e:
|
except exceptions.ResourceNotFound as e:
|
||||||
# log this error and return without the ip, but don't fail
|
# log this error and return without the ip, but don't fail
|
||||||
LOG.error(_LE("Failed to get routes for metadata proxy edge "
|
LOG.error("Failed to get routes for metadata proxy edge "
|
||||||
"%(edge)s: %(err)s"),
|
"%(edge)s: %(err)s",
|
||||||
{'edge': edge_id, 'err': e})
|
{'edge': edge_id, 'err': e})
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -381,8 +381,8 @@ class NsxVMetadataProxyHandler(object):
|
|||||||
lb_obj.submit_to_backend(self.nsxv_plugin.nsx_v.vcns, edge_id)
|
lb_obj.submit_to_backend(self.nsxv_plugin.nsx_v.vcns, edge_id)
|
||||||
except exceptions.RequestBad as e:
|
except exceptions.RequestBad as e:
|
||||||
# log the error and continue
|
# log the error and continue
|
||||||
LOG.error(_LE("Failed to update load balancer on metadata "
|
LOG.error("Failed to update load balancer on metadata "
|
||||||
"proxy edge %(edge)s: %(err)s"),
|
"proxy edge %(edge)s: %(err)s",
|
||||||
{'edge': edge_id, 'err': e})
|
{'edge': edge_id, 'err': e})
|
||||||
|
|
||||||
edge_ip = self._get_edge_internal_ip(context, rtr_id)
|
edge_ip = self._get_edge_internal_ip(context, rtr_id)
|
||||||
@ -504,8 +504,8 @@ class NsxVMetadataProxyHandler(object):
|
|||||||
return edge_ip
|
return edge_ip
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(_LE("Exception %s while creating internal edge "
|
LOG.exception("Exception %s while creating internal edge "
|
||||||
"for metadata service"), e)
|
"for metadata service", e)
|
||||||
|
|
||||||
ports = self.nsxv_plugin.get_ports(
|
ports = self.nsxv_plugin.get_ports(
|
||||||
context, filters={'device_id': [rtr_id]})
|
context, filters={'device_id': [rtr_id]})
|
||||||
@ -721,13 +721,13 @@ class NsxVMetadataProxyHandler(object):
|
|||||||
|
|
||||||
if ports:
|
if ports:
|
||||||
if warn:
|
if warn:
|
||||||
LOG.warning(_LW("cleanup_router_edge found port %(port)s for "
|
LOG.warning("cleanup_router_edge found port %(port)s for "
|
||||||
"router %(router)s - deleting it now."),
|
"router %(router)s - deleting it now.",
|
||||||
{'port': ports[0]['id'], 'router': rtr_id})
|
{'port': ports[0]['id'], 'router': rtr_id})
|
||||||
try:
|
try:
|
||||||
self.nsxv_plugin.delete_port(
|
self.nsxv_plugin.delete_port(
|
||||||
ctx, ports[0]['id'],
|
ctx, ports[0]['id'],
|
||||||
l3_port_check=False)
|
l3_port_check=False)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to delete md_proxy port %(port)s: "
|
LOG.error("Failed to delete md_proxy port %(port)s: "
|
||||||
"%(e)s"), {'port': ports[0]['id'], 'e': e})
|
"%(e)s", {'port': ports[0]['id'], 'e': e})
|
||||||
|
@ -85,7 +85,7 @@ from vmware_nsx.services.qos.common import utils as qos_com_utils
|
|||||||
from vmware_nsx.services.qos.nsx_v import utils as qos_utils
|
from vmware_nsx.services.qos.nsx_v import utils as qos_utils
|
||||||
|
|
||||||
import vmware_nsx
|
import vmware_nsx
|
||||||
from vmware_nsx._i18n import _, _LE, _LI, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import availability_zones as nsx_com_az
|
from vmware_nsx.common import availability_zones as nsx_com_az
|
||||||
from vmware_nsx.common import config # noqa
|
from vmware_nsx.common import config # noqa
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
@ -341,9 +341,9 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
vnic_id = self._get_port_vnic_id(vnic_idx, device_id)
|
vnic_id = self._get_port_vnic_id(vnic_idx, device_id)
|
||||||
self._add_member_to_security_group(sg_id, vnic_id)
|
self._add_member_to_security_group(sg_id, vnic_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.info(_LI('Could not add port %(port)s to service '
|
LOG.info('Could not add port %(port)s to service '
|
||||||
'insertion security group. Exception '
|
'insertion security group. Exception '
|
||||||
'%(err)s'),
|
'%(err)s',
|
||||||
{'port': port['id'], 'err': e})
|
{'port': port['id'], 'err': e})
|
||||||
|
|
||||||
# Doing this in a separate thread to not slow down the init process
|
# Doing this in a separate thread to not slow down the init process
|
||||||
@ -355,7 +355,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
# If called more than once - we should not create it again
|
# If called more than once - we should not create it again
|
||||||
return self.conn.consume_in_threads()
|
return self.conn.consume_in_threads()
|
||||||
|
|
||||||
LOG.info(_LI("NSXV plugin: starting RPC listeners"))
|
LOG.info("NSXV plugin: starting RPC listeners")
|
||||||
|
|
||||||
self.endpoints = [agents_db.AgentExtRpcCallback()]
|
self.endpoints = [agents_db.AgentExtRpcCallback()]
|
||||||
self.topic = topics.PLUGIN
|
self.topic = topics.PLUGIN
|
||||||
@ -533,8 +533,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
section_uri,
|
section_uri,
|
||||||
self.nsx_sg_utils.to_xml_string(section), h)
|
self.nsx_sg_utils.to_xml_string(section), h)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
LOG.error(_LE('Unable to update security group %(sg)s '
|
LOG.error('Unable to update security group %(sg)s '
|
||||||
'section for logging. %(e)s'),
|
'section for logging. %(e)s',
|
||||||
{'e': exc, 'sg': sg['id']})
|
{'e': exc, 'sg': sg['id']})
|
||||||
|
|
||||||
c_utils.spawn_n(process_security_groups_rules_logging)
|
c_utils.spawn_n(process_security_groups_rules_logging)
|
||||||
@ -557,8 +557,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
context, neutron_port_db['id'], network_id,
|
context, neutron_port_db['id'], network_id,
|
||||||
neutron_port_db['mac_address'])
|
neutron_port_db['mac_address'])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE('Unable to delete static bindings for %(id)s. '
|
LOG.error('Unable to delete static bindings for %(id)s. '
|
||||||
'Error: %(e)s'),
|
'Error: %(e)s',
|
||||||
{'id': neutron_port_db['id'], 'e': e})
|
{'id': neutron_port_db['id'], 'e': e})
|
||||||
|
|
||||||
def _validate_network_qos(self, network, backend_network):
|
def _validate_network_qos(self, network, backend_network):
|
||||||
@ -844,8 +844,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
self._vcm.update_port_group_spec_teaming,
|
self._vcm.update_port_group_spec_teaming,
|
||||||
switch)
|
switch)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE('Unable to update teaming information for '
|
LOG.error('Unable to update teaming information for '
|
||||||
'net %(net_id)s. Error: %(e)s'),
|
'net %(net_id)s. Error: %(e)s',
|
||||||
{'net_id': net_id, 'e': e})
|
{'net_id': net_id, 'e': e})
|
||||||
|
|
||||||
def _create_vlan_network_at_backend(self, net_data, dvs_id):
|
def _create_vlan_network_at_backend(self, net_data, dvs_id):
|
||||||
@ -889,13 +889,13 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
try:
|
try:
|
||||||
self.nsx_v.vcns.add_member_to_security_group(
|
self.nsx_v.vcns.add_member_to_security_group(
|
||||||
sg_id, vnic_id)
|
sg_id, vnic_id)
|
||||||
LOG.info(_LI("Added %(sg_id)s member to NSX security "
|
LOG.info("Added %(sg_id)s member to NSX security "
|
||||||
"group %(vnic_id)s"),
|
"group %(vnic_id)s",
|
||||||
{'sg_id': sg_id, 'vnic_id': vnic_id})
|
{'sg_id': sg_id, 'vnic_id': vnic_id})
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("NSX security group %(sg_id)s member add "
|
LOG.error("NSX security group %(sg_id)s member add "
|
||||||
"failed %(vnic_id)s."),
|
"failed %(vnic_id)s.",
|
||||||
{'sg_id': sg_id,
|
{'sg_id': sg_id,
|
||||||
'vnic_id': vnic_id})
|
'vnic_id': vnic_id})
|
||||||
|
|
||||||
@ -906,7 +906,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
for add_sg in added_sgids:
|
for add_sg in added_sgids:
|
||||||
nsx_sg_id = nsx_db.get_nsx_security_group_id(session, add_sg)
|
nsx_sg_id = nsx_db.get_nsx_security_group_id(session, add_sg)
|
||||||
if nsx_sg_id is None:
|
if nsx_sg_id is None:
|
||||||
LOG.warning(_LW("NSX security group not found for %s"), add_sg)
|
LOG.warning("NSX security group not found for %s", add_sg)
|
||||||
else:
|
else:
|
||||||
self._add_member_to_security_group(nsx_sg_id, vnic_id)
|
self._add_member_to_security_group(nsx_sg_id, vnic_id)
|
||||||
|
|
||||||
@ -929,7 +929,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
for del_sg in deleted_sgids:
|
for del_sg in deleted_sgids:
|
||||||
nsx_sg_id = nsx_db.get_nsx_security_group_id(session, del_sg)
|
nsx_sg_id = nsx_db.get_nsx_security_group_id(session, del_sg)
|
||||||
if nsx_sg_id is None:
|
if nsx_sg_id is None:
|
||||||
LOG.warning(_LW("NSX security group not found for %s"), del_sg)
|
LOG.warning("NSX security group not found for %s", del_sg)
|
||||||
else:
|
else:
|
||||||
self._remove_member_from_security_group(nsx_sg_id, vnic_id)
|
self._remove_member_from_security_group(nsx_sg_id, vnic_id)
|
||||||
|
|
||||||
@ -1211,7 +1211,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
elif network_type != c_utils.NsxVNetworkTypes.PORTGROUP:
|
elif network_type != c_utils.NsxVNetworkTypes.PORTGROUP:
|
||||||
for dvsmrf, netmrf in six.iteritems(dvs_pg_mappings):
|
for dvsmrf, netmrf in six.iteritems(dvs_pg_mappings):
|
||||||
self._delete_backend_network(netmrf, dvsmrf)
|
self._delete_backend_network(netmrf, dvsmrf)
|
||||||
LOG.exception(_LE('Failed to create network'))
|
LOG.exception('Failed to create network')
|
||||||
|
|
||||||
# If init is incomplete calling _update_qos_network() will result a
|
# If init is incomplete calling _update_qos_network() will result a
|
||||||
# deadlock.
|
# deadlock.
|
||||||
@ -1300,7 +1300,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
try:
|
try:
|
||||||
policy = self.nsx_v.vcns.get_spoofguard_policy(policy_id)[1]
|
policy = self.nsx_v.vcns.get_spoofguard_policy(policy_id)[1]
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_LE("Policy does not exists for %s"), policy_id)
|
LOG.error("Policy does not exists for %s", policy_id)
|
||||||
# We will not attempt to delete a policy that does not exist
|
# We will not attempt to delete a policy that does not exist
|
||||||
return False
|
return False
|
||||||
if policy:
|
if policy:
|
||||||
@ -1333,14 +1333,14 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
is_dhcp_backend_deleted = True
|
is_dhcp_backend_deleted = True
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE('Failed to delete network'))
|
LOG.exception('Failed to delete network')
|
||||||
for port_id in auto_del:
|
for port_id in auto_del:
|
||||||
try:
|
try:
|
||||||
self.delete_port(context.elevated(), port_id,
|
self.delete_port(context.elevated(), port_id,
|
||||||
force_delete_dhcp=True)
|
force_delete_dhcp=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW('Unable to delete port %(port_id)s. '
|
LOG.warning('Unable to delete port %(port_id)s. '
|
||||||
'Reason: %(e)s'),
|
'Reason: %(e)s',
|
||||||
{'port_id': port_id, 'e': e})
|
{'port_id': port_id, 'e': e})
|
||||||
|
|
||||||
with context.session.begin(subtransactions=True):
|
with context.session.begin(subtransactions=True):
|
||||||
@ -1491,10 +1491,10 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
orig_net[psec.PORTSECURITY] !=
|
orig_net[psec.PORTSECURITY] !=
|
||||||
net_attrs[psec.PORTSECURITY])
|
net_attrs[psec.PORTSECURITY])
|
||||||
if psec_update and not net_attrs[psec.PORTSECURITY]:
|
if psec_update and not net_attrs[psec.PORTSECURITY]:
|
||||||
LOG.warning(_LW("Disabling port-security on network %s would "
|
LOG.warning("Disabling port-security on network %s would "
|
||||||
"require instance in the network to have VM tools "
|
"require instance in the network to have VM tools "
|
||||||
"installed in order for security-groups to "
|
"installed in order for security-groups to "
|
||||||
"function properly."))
|
"function properly.")
|
||||||
|
|
||||||
# Check if the physical network of a vlan provider network was updated
|
# Check if the physical network of a vlan provider network was updated
|
||||||
updated_morefs = False
|
updated_morefs = False
|
||||||
@ -1701,7 +1701,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
self._create_dhcp_static_binding(context, neutron_db)
|
self._create_dhcp_static_binding(context, neutron_db)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE('Failed to create port'))
|
LOG.exception('Failed to create port')
|
||||||
# Revert what we have created and raise the exception
|
# Revert what we have created and raise the exception
|
||||||
self.delete_port(context, port_data['id'])
|
self.delete_port(context, port_data['id'])
|
||||||
|
|
||||||
@ -1744,20 +1744,20 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
vm_moref = self._vcm.get_vm_moref(device_id)
|
vm_moref = self._vcm.get_vm_moref(device_id)
|
||||||
if vm_moref is not None:
|
if vm_moref is not None:
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Add VM %(dev)s to exclude list on "
|
LOG.info("Add VM %(dev)s to exclude list on "
|
||||||
"behalf of port %(port)s: added to "
|
"behalf of port %(port)s: added to "
|
||||||
"list"),
|
"list",
|
||||||
{"dev": device_id, "port": port_id})
|
{"dev": device_id, "port": port_id})
|
||||||
self.nsx_v.vcns.add_vm_to_exclude_list(vm_moref)
|
self.nsx_v.vcns.add_vm_to_exclude_list(vm_moref)
|
||||||
except vsh_exc.RequestBad as e:
|
except vsh_exc.RequestBad as e:
|
||||||
LOG.error(_LE("Failed to add vm %(device)s "
|
LOG.error("Failed to add vm %(device)s "
|
||||||
"moref %(moref)s to exclude list: "
|
"moref %(moref)s to exclude list: "
|
||||||
"%(err)s"),
|
"%(err)s",
|
||||||
{'device': device_id, 'moref': vm_moref,
|
{'device': device_id, 'moref': vm_moref,
|
||||||
'err': e})
|
'err': e})
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Add VM %(dev)s to exclude list on behalf of "
|
LOG.info("Add VM %(dev)s to exclude list on behalf of "
|
||||||
"port %(port)s: already in list"),
|
"port %(port)s: already in list",
|
||||||
{"dev": device_id, "port": port_id})
|
{"dev": device_id, "port": port_id})
|
||||||
|
|
||||||
def _remove_vm_from_exclude_list(self, context, device_id, port_id,
|
def _remove_vm_from_exclude_list(self, context, device_id, port_id,
|
||||||
@ -1772,20 +1772,20 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
vm_moref = self._vcm.get_vm_moref(device_id)
|
vm_moref = self._vcm.get_vm_moref(device_id)
|
||||||
if vm_moref is not None:
|
if vm_moref is not None:
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Remove VM %(dev)s from exclude list on "
|
LOG.info("Remove VM %(dev)s from exclude list on "
|
||||||
"behalf of port %(port)s: removed from "
|
"behalf of port %(port)s: removed from "
|
||||||
"list"),
|
"list",
|
||||||
{"dev": device_id, "port": port_id})
|
{"dev": device_id, "port": port_id})
|
||||||
self.nsx_v.vcns.delete_vm_from_exclude_list(vm_moref)
|
self.nsx_v.vcns.delete_vm_from_exclude_list(vm_moref)
|
||||||
except vsh_exc.RequestBad as e:
|
except vsh_exc.RequestBad as e:
|
||||||
LOG.error(_LE("Failed to delete vm %(device)s "
|
LOG.error("Failed to delete vm %(device)s "
|
||||||
"moref %(moref)s from exclude list: "
|
"moref %(moref)s from exclude list: "
|
||||||
"%(err)s"),
|
"%(err)s",
|
||||||
{'device': device_id, 'moref': vm_moref,
|
{'device': device_id, 'moref': vm_moref,
|
||||||
'err': e})
|
'err': e})
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Remove VM %(dev)s from exclude list on behalf "
|
LOG.info("Remove VM %(dev)s from exclude list on behalf "
|
||||||
"of port %(port)s: other ports still in list"),
|
"of port %(port)s: other ports still in list",
|
||||||
{"dev": device_id, "port": port_id})
|
{"dev": device_id, "port": port_id})
|
||||||
|
|
||||||
def update_port(self, context, id, port):
|
def update_port(self, context, id, port):
|
||||||
@ -1961,8 +1961,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
ret_port['network_id'],
|
ret_port['network_id'],
|
||||||
old_ip, new_ip, sub_mask)
|
old_ip, new_ip, sub_mask)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI('Not updating fixed IP on backend for '
|
LOG.info('Not updating fixed IP on backend for '
|
||||||
'device owner [%(dev_own)s] and port %(pid)s'),
|
'device owner [%(dev_own)s] and port %(pid)s',
|
||||||
{'dev_own': owner, 'pid': original_port['id']})
|
{'dev_own': owner, 'pid': original_port['id']})
|
||||||
|
|
||||||
# update port security in DB if changed
|
# update port security in DB if changed
|
||||||
@ -1990,8 +1990,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
original_port['network_id'],
|
original_port['network_id'],
|
||||||
vnic_id)
|
vnic_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE('Could not delete the spoofguard '
|
LOG.error('Could not delete the spoofguard '
|
||||||
'policy. Exception %s'), e)
|
'policy. Exception %s', e)
|
||||||
# remove vm from the exclusion list when it is detached
|
# remove vm from the exclusion list when it is detached
|
||||||
# from the device if it has no port security
|
# from the device if it has no port security
|
||||||
if not original_port[psec.PORTSECURITY]:
|
if not original_port[psec.PORTSECURITY]:
|
||||||
@ -2030,8 +2030,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
context.session, original_port['network_id'],
|
context.session, original_port['network_id'],
|
||||||
vnic_id)
|
vnic_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE('Could not delete the spoofguard '
|
LOG.error('Could not delete the spoofguard '
|
||||||
'policy. Exception %s'), e)
|
'policy. Exception %s', e)
|
||||||
# Add vm to the exclusion list, since it has no port
|
# Add vm to the exclusion list, since it has no port
|
||||||
# security now
|
# security now
|
||||||
self._add_vm_to_exclude_list(context, device_id, id)
|
self._add_vm_to_exclude_list(context, device_id, id)
|
||||||
@ -2049,11 +2049,11 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
self._update_vnic_assigned_addresses(
|
self._update_vnic_assigned_addresses(
|
||||||
context.session, ret_port, vnic_id)
|
context.session, ret_port, vnic_id)
|
||||||
if not has_port_security and has_security_groups:
|
if not has_port_security and has_security_groups:
|
||||||
LOG.warning(_LW("port-security is disabled on "
|
LOG.warning("port-security is disabled on "
|
||||||
"port %(id)s, "
|
"port %(id)s, "
|
||||||
"VM tools must be installed on instance "
|
"VM tools must be installed on instance "
|
||||||
"%(device_id)s for security-groups to "
|
"%(device_id)s for security-groups to "
|
||||||
"function properly "),
|
"function properly ",
|
||||||
{'id': id,
|
{'id': id,
|
||||||
'device_id': original_port['device_id']})
|
'device_id': original_port['device_id']})
|
||||||
if (delete_security_groups
|
if (delete_security_groups
|
||||||
@ -2131,8 +2131,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
context.session, neutron_db_port['network_id'],
|
context.session, neutron_db_port['network_id'],
|
||||||
vnic_id)
|
vnic_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE('Could not delete the spoofguard policy. '
|
LOG.error('Could not delete the spoofguard policy. '
|
||||||
'Exception %s'), e)
|
'Exception %s', e)
|
||||||
|
|
||||||
if (cfg.CONF.nsxv.spoofguard_enabled and
|
if (cfg.CONF.nsxv.spoofguard_enabled and
|
||||||
not neutron_db_port[psec.PORTSECURITY] and
|
not neutron_db_port[psec.PORTSECURITY] and
|
||||||
@ -2270,15 +2270,15 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
s = self.create_subnet(context, item)
|
s = self.create_subnet(context, item)
|
||||||
new_subnets.append(s)
|
new_subnets.append(s)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE('Unable to create bulk subnets. Failed to '
|
LOG.error('Unable to create bulk subnets. Failed to '
|
||||||
'create item %(item)s. Rolling back. '
|
'create item %(item)s. Rolling back. '
|
||||||
'Error: %(e)s'), {'item': item, 'e': e})
|
'Error: %(e)s', {'item': item, 'e': e})
|
||||||
for subnet in new_subnets:
|
for subnet in new_subnets:
|
||||||
s_id = subnet['id']
|
s_id = subnet['id']
|
||||||
try:
|
try:
|
||||||
self.delete_subnet(context, s_id)
|
self.delete_subnet(context, s_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_LE('Unable to delete subnet %s'), s_id)
|
LOG.error('Unable to delete subnet %s', s_id)
|
||||||
raise
|
raise
|
||||||
return new_subnets
|
return new_subnets
|
||||||
|
|
||||||
@ -2455,7 +2455,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
if vdr_dhcp_binding:
|
if vdr_dhcp_binding:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('VDR DHCP binding not found for router %s'),
|
LOG.error('VDR DHCP binding not found for router %s',
|
||||||
vdr_id)
|
vdr_id)
|
||||||
sids = self.get_subnets(context,
|
sids = self.get_subnets(context,
|
||||||
filters={'network_id': [network_id],
|
filters={'network_id': [network_id],
|
||||||
@ -2629,7 +2629,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to update DHCP for subnet %s"),
|
LOG.exception("Failed to update DHCP for subnet %s",
|
||||||
subnet['id'])
|
subnet['id'])
|
||||||
|
|
||||||
def setup_dhcp_edge_fw_rules(self, context, plugin, router_id):
|
def setup_dhcp_edge_fw_rules(self, context, plugin, router_id):
|
||||||
@ -2650,7 +2650,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(
|
LOG.error(
|
||||||
_LE('Could not find ICMP Echo application. Exception %s'),
|
'Could not find ICMP Echo application. Exception %s',
|
||||||
e)
|
e)
|
||||||
else:
|
else:
|
||||||
# For newer versions, we can use the raw icmp rule
|
# For newer versions, we can use the raw icmp rule
|
||||||
@ -2671,7 +2671,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
# On failure, log that we couldn't configure the firewall on the
|
# On failure, log that we couldn't configure the firewall on the
|
||||||
# Edge appliance. This won't break the DHCP functionality
|
# Edge appliance. This won't break the DHCP functionality
|
||||||
LOG.error(
|
LOG.error(
|
||||||
_LE('Could not set up DHCP Edge firewall. Exception %s'), e)
|
'Could not set up DHCP Edge firewall. Exception %s', e)
|
||||||
|
|
||||||
def _create_network_dhcp_address_group(self, context, network_id):
|
def _create_network_dhcp_address_group(self, context, network_id):
|
||||||
"""Create dhcp address group for subnets attached to the network."""
|
"""Create dhcp address group for subnets attached to the network."""
|
||||||
@ -2778,14 +2778,14 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
try:
|
try:
|
||||||
metainfo = jsonutils.loads(metainfo_string)
|
metainfo = jsonutils.loads(metainfo_string)
|
||||||
if not isinstance(metainfo, dict):
|
if not isinstance(metainfo, dict):
|
||||||
LOG.warning(_LW("Skipping router flavor %(flavor)s metainfo "
|
LOG.warning("Skipping router flavor %(flavor)s metainfo "
|
||||||
"[%(metainfo)s]: expected a dictionary"),
|
"[%(metainfo)s]: expected a dictionary",
|
||||||
{'flavor': flavor_id,
|
{'flavor': flavor_id,
|
||||||
'metainfo': metainfo_string})
|
'metainfo': metainfo_string})
|
||||||
metainfo = {}
|
metainfo = {}
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
LOG.warning(_LW("Error reading router flavor %(flavor)s metainfo "
|
LOG.warning("Error reading router flavor %(flavor)s metainfo "
|
||||||
"[%(metainfo)s]: %(error)s"),
|
"[%(metainfo)s]: %(error)s",
|
||||||
{'flavor': flavor_id,
|
{'flavor': flavor_id,
|
||||||
'metainfo': metainfo_string,
|
'metainfo': metainfo_string,
|
||||||
'error': e})
|
'error': e})
|
||||||
@ -2836,8 +2836,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
elif k in future_use_keys:
|
elif k in future_use_keys:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Skipping router flavor metainfo [%(k)s:%(v)s]"
|
LOG.warning("Skipping router flavor metainfo [%(k)s:%(v)s]"
|
||||||
":unsupported field"),
|
":unsupported field",
|
||||||
{'k': k, 'v': v})
|
{'k': k, 'v': v})
|
||||||
|
|
||||||
def _process_extra_attr_router_create(self, context, router_db, r):
|
def _process_extra_attr_router_create(self, context, router_db, r):
|
||||||
@ -3049,7 +3049,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
if binding:
|
if binding:
|
||||||
router[ROUTER_SIZE] = binding.get("appliance_size")
|
router[ROUTER_SIZE] = binding.get("appliance_size")
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("No binding for router %s"), id)
|
LOG.error("No binding for router %s", id)
|
||||||
return router
|
return router
|
||||||
|
|
||||||
def _get_external_attachment_info(self, context, router):
|
def _get_external_attachment_info(self, context, router):
|
||||||
@ -3157,16 +3157,16 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
is_routes_update=is_routes_update)
|
is_routes_update=is_routes_update)
|
||||||
except n_exc.IpAddressGenerationFailure:
|
except n_exc.IpAddressGenerationFailure:
|
||||||
del info['external_fixed_ips']
|
del info['external_fixed_ips']
|
||||||
LOG.warning(_LW("Cannot get one subnet with gateway "
|
LOG.warning("Cannot get one subnet with gateway "
|
||||||
"to allocate one available gw ip"))
|
"to allocate one available gw ip")
|
||||||
router_driver._update_router_gw_info(
|
router_driver._update_router_gw_info(
|
||||||
context, router_id, info,
|
context, router_id, info,
|
||||||
is_routes_update=is_routes_update,
|
is_routes_update=is_routes_update,
|
||||||
force_update=force_update)
|
force_update=force_update)
|
||||||
except vsh_exc.VcnsApiException:
|
except vsh_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Failed to update gw_info %(info)s on "
|
LOG.error("Failed to update gw_info %(info)s on "
|
||||||
"router %(router_id)s"),
|
"router %(router_id)s",
|
||||||
{'info': str(info),
|
{'info': str(info),
|
||||||
'router_id': router_id})
|
'router_id': router_id})
|
||||||
router_driver._update_router_gw_info(
|
router_driver._update_router_gw_info(
|
||||||
@ -3338,8 +3338,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
context, router_id, interface_info)
|
context, router_id, interface_info)
|
||||||
except vsh_exc.VcnsApiException:
|
except vsh_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Failed to add interface_info %(info)s on "
|
LOG.error("Failed to add interface_info %(info)s on "
|
||||||
"router %(router_id)s"),
|
"router %(router_id)s",
|
||||||
{'info': str(interface_info),
|
{'info': str(interface_info),
|
||||||
'router_id': router_id})
|
'router_id': router_id})
|
||||||
router_driver.remove_router_interface(
|
router_driver.remove_router_interface(
|
||||||
@ -3389,7 +3389,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
self._update_edge_router(context, router_id)
|
self._update_edge_router(context, router_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to update edge router"))
|
LOG.exception("Failed to update edge router")
|
||||||
super(NsxVPluginV2, self).delete_floatingip(context,
|
super(NsxVPluginV2, self).delete_floatingip(context,
|
||||||
fip_db['id'])
|
fip_db['id'])
|
||||||
self._set_floatingip_status(context, fip_db)
|
self._set_floatingip_status(context, fip_db)
|
||||||
@ -3411,7 +3411,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
self._update_edge_router(context, router_id)
|
self._update_edge_router(context, router_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to update edge router"))
|
LOG.exception("Failed to update edge router")
|
||||||
super(NsxVPluginV2, self).update_floatingip(
|
super(NsxVPluginV2, self).update_floatingip(
|
||||||
context, id, {'floatingip': {'port_id': old_port_id}})
|
context, id, {'floatingip': {'port_id': old_port_id}})
|
||||||
self._set_floatingip_status(context, fip_db)
|
self._set_floatingip_status(context, fip_db)
|
||||||
@ -3498,7 +3498,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
edge_utils.update_firewall(self.nsx_v, context, router_id, fake_fw,
|
edge_utils.update_firewall(self.nsx_v, context, router_id, fake_fw,
|
||||||
allow_external=allow_external)
|
allow_external=allow_external)
|
||||||
except vsh_exc.ResourceNotFound:
|
except vsh_exc.ResourceNotFound:
|
||||||
LOG.error(_LE("Failed to update firewall for router %s"),
|
LOG.error("Failed to update firewall for router %s",
|
||||||
router_id)
|
router_id)
|
||||||
|
|
||||||
def _delete_nsx_security_group(self, nsx_sg_id, nsx_policy):
|
def _delete_nsx_security_group(self, nsx_sg_id, nsx_policy):
|
||||||
@ -3513,8 +3513,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
self.nsx_sg_utils.del_nsx_security_group_from_policy(
|
self.nsx_sg_utils.del_nsx_security_group_from_policy(
|
||||||
nsx_policy, nsx_sg_id)
|
nsx_policy, nsx_sg_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW("Failed to remove nsx security group "
|
LOG.warning("Failed to remove nsx security group "
|
||||||
"%(id)s from policy %(pol)s : %(e)s"),
|
"%(id)s from policy %(pol)s : %(e)s",
|
||||||
{'id': nsx_sg_id, 'pol': nsx_policy, 'e': e})
|
{'id': nsx_sg_id, 'pol': nsx_policy, 'e': e})
|
||||||
|
|
||||||
self.nsx_v.vcns.delete_security_group(nsx_sg_id)
|
self.nsx_v.vcns.delete_security_group(nsx_sg_id)
|
||||||
@ -3699,7 +3699,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
if default_sg:
|
if default_sg:
|
||||||
context = context.elevated()
|
context = context.elevated()
|
||||||
super(NsxVPluginV2, self).delete_security_group(context, sg_id)
|
super(NsxVPluginV2, self).delete_security_group(context, sg_id)
|
||||||
LOG.exception(_LE('Failed to create security group'))
|
LOG.exception('Failed to create security group')
|
||||||
|
|
||||||
return new_sg
|
return new_sg
|
||||||
|
|
||||||
@ -3818,7 +3818,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to delete security group"))
|
LOG.exception("Failed to delete security group")
|
||||||
|
|
||||||
def _create_nsx_rule(self, context, rule,
|
def _create_nsx_rule(self, context, rule,
|
||||||
nsx_sg_id=None, logged=False, action='allow'):
|
nsx_sg_id=None, logged=False, action='allow'):
|
||||||
@ -3962,7 +3962,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
if p['neutron_id'] in ruleids]:
|
if p['neutron_id'] in ruleids]:
|
||||||
self.nsx_v.vcns.remove_rule_from_section(
|
self.nsx_v.vcns.remove_rule_from_section(
|
||||||
section_uri, nsx_rule_id)
|
section_uri, nsx_rule_id)
|
||||||
LOG.exception(_LE("Failed to create security group rule"))
|
LOG.exception("Failed to create security group rule")
|
||||||
return new_rule_list
|
return new_rule_list
|
||||||
|
|
||||||
def delete_security_group_rule(self, context, id):
|
def delete_security_group_rule(self, context, id):
|
||||||
@ -4019,13 +4019,13 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
try:
|
try:
|
||||||
self.nsx_v.vcns.edges_lock_operation()
|
self.nsx_v.vcns.edges_lock_operation()
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.info(_LI("Unable to set manager lock operation"))
|
LOG.info("Unable to set manager lock operation")
|
||||||
|
|
||||||
def _aggregate_publishing(self):
|
def _aggregate_publishing(self):
|
||||||
try:
|
try:
|
||||||
self.nsx_v.vcns.configure_aggregate_publishing()
|
self.nsx_v.vcns.configure_aggregate_publishing()
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.info(_LI("Unable to configure aggregate publishing"))
|
LOG.info("Unable to configure aggregate publishing")
|
||||||
|
|
||||||
def _configure_reservations(self):
|
def _configure_reservations(self):
|
||||||
ver = self.nsx_v.vcns.get_version()
|
ver = self.nsx_v.vcns.get_version()
|
||||||
@ -4036,7 +4036,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
try:
|
try:
|
||||||
self.nsx_v.vcns.configure_reservations()
|
self.nsx_v.vcns.configure_reservations()
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.info(_LI("Unable to configure edge reservations"))
|
LOG.info("Unable to configure edge reservations")
|
||||||
|
|
||||||
def _validate_config(self):
|
def _validate_config(self):
|
||||||
if (cfg.CONF.nsxv.dvs_id and
|
if (cfg.CONF.nsxv.dvs_id and
|
||||||
@ -4071,7 +4071,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
|
|
||||||
ver = self.nsx_v.vcns.get_version()
|
ver = self.nsx_v.vcns.get_version()
|
||||||
if version.LooseVersion(ver) < version.LooseVersion('6.2.0'):
|
if version.LooseVersion(ver) < version.LooseVersion('6.2.0'):
|
||||||
LOG.warning(_LW("Skipping validations. Not supported by version."))
|
LOG.warning("Skipping validations. Not supported by version.")
|
||||||
return
|
return
|
||||||
|
|
||||||
# Validate the host_groups for each AZ
|
# Validate the host_groups for each AZ
|
||||||
|
@ -23,7 +23,7 @@ from oslo_serialization import jsonutils
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
from sqlalchemy.orm import exc as sa_exc
|
from sqlalchemy.orm import exc as sa_exc
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE, _LI, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import exceptions as nsxv_exc
|
from vmware_nsx.common import exceptions as nsxv_exc
|
||||||
from vmware_nsx.common import nsxv_constants
|
from vmware_nsx.common import nsxv_constants
|
||||||
from vmware_nsx.common import utils
|
from vmware_nsx.common import utils
|
||||||
@ -248,8 +248,8 @@ class EdgeApplianceDriver(object):
|
|||||||
status_level = self._edge_status_to_level(
|
status_level = self._edge_status_to_level(
|
||||||
response['edgeStatus'])
|
response['edgeStatus'])
|
||||||
except exceptions.VcnsApiException as e:
|
except exceptions.VcnsApiException as e:
|
||||||
LOG.error(_LE("VCNS: Failed to get edge %(edge_id)s status: "
|
LOG.error("VCNS: Failed to get edge %(edge_id)s status: "
|
||||||
"Reason: %(reason)s"),
|
"Reason: %(reason)s",
|
||||||
{'edge_id': edge_id, 'reason': e.response})
|
{'edge_id': edge_id, 'reason': e.response})
|
||||||
status_level = constants.RouterStatus.ROUTER_STATUS_ERROR
|
status_level = constants.RouterStatus.ROUTER_STATUS_ERROR
|
||||||
try:
|
try:
|
||||||
@ -258,7 +258,7 @@ class EdgeApplianceDriver(object):
|
|||||||
constants.VCNS_ERROR_CODE_EDGE_NOT_RUNNING):
|
constants.VCNS_ERROR_CODE_EDGE_NOT_RUNNING):
|
||||||
status_level = constants.RouterStatus.ROUTER_STATUS_DOWN
|
status_level = constants.RouterStatus.ROUTER_STATUS_DOWN
|
||||||
except ValueError:
|
except ValueError:
|
||||||
LOG.error(_LE('Error code not present. %s'), e.response)
|
LOG.error('Error code not present. %s', e.response)
|
||||||
|
|
||||||
return status_level
|
return status_level
|
||||||
|
|
||||||
@ -278,7 +278,7 @@ class EdgeApplianceDriver(object):
|
|||||||
return self.vcns.query_interface(edge_id, vnic_index)
|
return self.vcns.query_interface(edge_id, vnic_index)
|
||||||
except exceptions.VcnsApiException:
|
except exceptions.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("NSXv: Failed to query vnic %s"), vnic_index)
|
LOG.exception("NSXv: Failed to query vnic %s", vnic_index)
|
||||||
|
|
||||||
def update_interface(self, router_id, edge_id, index, network,
|
def update_interface(self, router_id, edge_id, index, network,
|
||||||
tunnel_index=-1, address=None, netmask=None,
|
tunnel_index=-1, address=None, netmask=None,
|
||||||
@ -338,8 +338,8 @@ class EdgeApplianceDriver(object):
|
|||||||
edge_id, index, interface_req)
|
edge_id, index, interface_req)
|
||||||
except exceptions.VcnsApiException:
|
except exceptions.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to update vdr interface on edge: "
|
LOG.exception("Failed to update vdr interface on edge: "
|
||||||
"%s"), edge_id)
|
"%s", edge_id)
|
||||||
|
|
||||||
def delete_vdr_internal_interface(self, edge_id, interface_index):
|
def delete_vdr_internal_interface(self, edge_id, interface_index):
|
||||||
LOG.debug("Delete VDR interface on edge: %s", edge_id)
|
LOG.debug("Delete VDR interface on edge: %s", edge_id)
|
||||||
@ -348,8 +348,8 @@ class EdgeApplianceDriver(object):
|
|||||||
edge_id, interface_index)
|
edge_id, interface_index)
|
||||||
except exceptions.VcnsApiException:
|
except exceptions.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to delete vdr interface on edge: "
|
LOG.exception("Failed to delete vdr interface on edge: "
|
||||||
"%s"),
|
"%s",
|
||||||
edge_id)
|
edge_id)
|
||||||
|
|
||||||
def delete_interface(self, router_id, edge_id, index):
|
def delete_interface(self, router_id, edge_id, index):
|
||||||
@ -358,14 +358,14 @@ class EdgeApplianceDriver(object):
|
|||||||
try:
|
try:
|
||||||
self.vcns.delete_interface(edge_id, index)
|
self.vcns.delete_interface(edge_id, index)
|
||||||
except exceptions.ResourceNotFound:
|
except exceptions.ResourceNotFound:
|
||||||
LOG.error(_LE('Failed to delete vnic %(vnic_index)s on edge '
|
LOG.error('Failed to delete vnic %(vnic_index)s on edge '
|
||||||
'%(edge_id)s: edge was not found'),
|
'%(edge_id)s: edge was not found',
|
||||||
{'vnic_index': index,
|
{'vnic_index': index,
|
||||||
'edge_id': edge_id})
|
'edge_id': edge_id})
|
||||||
except exceptions.VcnsApiException:
|
except exceptions.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to delete vnic %(vnic_index)s: "
|
LOG.exception("Failed to delete vnic %(vnic_index)s: "
|
||||||
"on edge %(edge_id)s"),
|
"on edge %(edge_id)s",
|
||||||
{'vnic_index': index,
|
{'vnic_index': index,
|
||||||
'edge_id': edge_id})
|
'edge_id': edge_id})
|
||||||
|
|
||||||
@ -376,7 +376,7 @@ class EdgeApplianceDriver(object):
|
|||||||
try:
|
try:
|
||||||
return self.vcns.get_edges()[1]
|
return self.vcns.get_edges()[1]
|
||||||
except exceptions.VcnsApiException as e:
|
except exceptions.VcnsApiException as e:
|
||||||
LOG.exception(_LE("VCNS: Failed to get edges:\n%s"), e.response)
|
LOG.exception("VCNS: Failed to get edges:\n%s", e.response)
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
def deploy_edge(self, context, router_id, name, internal_network,
|
def deploy_edge(self, context, router_id, name, internal_network,
|
||||||
@ -450,7 +450,7 @@ class EdgeApplianceDriver(object):
|
|||||||
self.callbacks.complete_edge_creation(
|
self.callbacks.complete_edge_creation(
|
||||||
context, edge_id, name, router_id, dist, False)
|
context, edge_id, name, router_id, dist, False)
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("NSXv: deploy edge failed."))
|
LOG.exception("NSXv: deploy edge failed.")
|
||||||
return edge_id
|
return edge_id
|
||||||
|
|
||||||
def update_edge(self, context, router_id, edge_id, name, internal_network,
|
def update_edge(self, context, router_id, edge_id, name, internal_network,
|
||||||
@ -494,7 +494,7 @@ class EdgeApplianceDriver(object):
|
|||||||
self.callbacks.complete_edge_update(
|
self.callbacks.complete_edge_update(
|
||||||
context, edge_id, router_id, True, set_errors)
|
context, edge_id, router_id, True, set_errors)
|
||||||
except exceptions.VcnsApiException as e:
|
except exceptions.VcnsApiException as e:
|
||||||
LOG.error(_LE("Failed to update edge: %s"),
|
LOG.error("Failed to update edge: %s",
|
||||||
e.response)
|
e.response)
|
||||||
self.callbacks.complete_edge_update(
|
self.callbacks.complete_edge_update(
|
||||||
context, edge_id, router_id, False, set_errors)
|
context, edge_id, router_id, False, set_errors)
|
||||||
@ -518,7 +518,7 @@ class EdgeApplianceDriver(object):
|
|||||||
# update the edge
|
# update the edge
|
||||||
self.vcns.update_edge(edge_id, edge)
|
self.vcns.update_edge(edge_id, edge)
|
||||||
except exceptions.VcnsApiException as e:
|
except exceptions.VcnsApiException as e:
|
||||||
LOG.error(_LE("Failed to rename edge: %s"),
|
LOG.error("Failed to rename edge: %s",
|
||||||
e.response)
|
e.response)
|
||||||
|
|
||||||
def resize_edge(self, edge_id, size):
|
def resize_edge(self, edge_id, size):
|
||||||
@ -542,7 +542,7 @@ class EdgeApplianceDriver(object):
|
|||||||
# update the edge
|
# update the edge
|
||||||
self.vcns.update_edge(edge_id, edge)
|
self.vcns.update_edge(edge_id, edge)
|
||||||
except exceptions.VcnsApiException as e:
|
except exceptions.VcnsApiException as e:
|
||||||
LOG.error(_LE("Failed to resize edge: %s"), e.response)
|
LOG.error("Failed to resize edge: %s", e.response)
|
||||||
|
|
||||||
def delete_edge(self, context, router_id, edge_id, dist=False):
|
def delete_edge(self, context, router_id, edge_id, dist=False):
|
||||||
try:
|
try:
|
||||||
@ -550,7 +550,7 @@ class EdgeApplianceDriver(object):
|
|||||||
if not dist:
|
if not dist:
|
||||||
nsxv_db.clean_edge_vnic_binding(context.session, edge_id)
|
nsxv_db.clean_edge_vnic_binding(context.session, edge_id)
|
||||||
except sa_exc.NoResultFound:
|
except sa_exc.NoResultFound:
|
||||||
LOG.warning(_LW("Router Binding for %s not found"), router_id)
|
LOG.warning("Router Binding for %s not found", router_id)
|
||||||
|
|
||||||
if edge_id:
|
if edge_id:
|
||||||
try:
|
try:
|
||||||
@ -559,12 +559,12 @@ class EdgeApplianceDriver(object):
|
|||||||
except exceptions.ResourceNotFound:
|
except exceptions.ResourceNotFound:
|
||||||
return True
|
return True
|
||||||
except exceptions.VcnsApiException as e:
|
except exceptions.VcnsApiException as e:
|
||||||
LOG.exception(_LE("VCNS: Failed to delete %(edge_id)s:\n"
|
LOG.exception("VCNS: Failed to delete %(edge_id)s:\n"
|
||||||
"%(response)s"),
|
"%(response)s",
|
||||||
{'edge_id': edge_id, 'response': e.response})
|
{'edge_id': edge_id, 'response': e.response})
|
||||||
return False
|
return False
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("VCNS: Failed to delete %s"), edge_id)
|
LOG.exception("VCNS: Failed to delete %s", edge_id)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _assemble_nat_rule(self, action, original_address,
|
def _assemble_nat_rule(self, action, original_address,
|
||||||
@ -591,7 +591,7 @@ class EdgeApplianceDriver(object):
|
|||||||
try:
|
try:
|
||||||
return self.vcns.get_nat_config(edge_id)[1]
|
return self.vcns.get_nat_config(edge_id)[1]
|
||||||
except exceptions.VcnsApiException as e:
|
except exceptions.VcnsApiException as e:
|
||||||
LOG.exception(_LE("VCNS: Failed to get nat config:\n%s"),
|
LOG.exception("VCNS: Failed to get nat config:\n%s",
|
||||||
e.response)
|
e.response)
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
@ -658,7 +658,7 @@ class EdgeApplianceDriver(object):
|
|||||||
self.vcns.update_nat_config(edge_id, nat)
|
self.vcns.update_nat_config(edge_id, nat)
|
||||||
return True
|
return True
|
||||||
except exceptions.VcnsApiException as e:
|
except exceptions.VcnsApiException as e:
|
||||||
LOG.exception(_LE("VCNS: Failed to create snat rule:\n%s"),
|
LOG.exception("VCNS: Failed to create snat rule:\n%s",
|
||||||
e.response)
|
e.response)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -696,7 +696,7 @@ class EdgeApplianceDriver(object):
|
|||||||
self.vcns.update_routes(edge_id, request)
|
self.vcns.update_routes(edge_id, request)
|
||||||
return True
|
return True
|
||||||
except exceptions.VcnsApiException as e:
|
except exceptions.VcnsApiException as e:
|
||||||
LOG.exception(_LE("VCNS: Failed to update routes:\n%s"),
|
LOG.exception("VCNS: Failed to update routes:\n%s",
|
||||||
e.response)
|
e.response)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -726,7 +726,7 @@ class EdgeApplianceDriver(object):
|
|||||||
edge_id)
|
edge_id)
|
||||||
except exceptions.VcnsApiException:
|
except exceptions.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to get service config"))
|
LOG.exception("Failed to get service config")
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def enable_service_loadbalancer(self, edge_id):
|
def enable_service_loadbalancer(self, edge_id):
|
||||||
@ -738,8 +738,8 @@ class EdgeApplianceDriver(object):
|
|||||||
self.vcns.enable_service_loadbalancer(edge_id, config)
|
self.vcns.enable_service_loadbalancer(edge_id, config)
|
||||||
except exceptions.VcnsApiException:
|
except exceptions.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to enable loadbalancer "
|
LOG.exception("Failed to enable loadbalancer "
|
||||||
"service config"))
|
"service config")
|
||||||
|
|
||||||
def _delete_port_group(self, task):
|
def _delete_port_group(self, task):
|
||||||
try:
|
try:
|
||||||
@ -747,7 +747,7 @@ class EdgeApplianceDriver(object):
|
|||||||
task.userdata['dvs_id'],
|
task.userdata['dvs_id'],
|
||||||
task.userdata['port_group_id'])
|
task.userdata['port_group_id'])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE('Unable to delete %(pg)s exception %(ex)s'),
|
LOG.error('Unable to delete %(pg)s exception %(ex)s',
|
||||||
{'pg': task.userdata['port_group_id'],
|
{'pg': task.userdata['port_group_id'],
|
||||||
'ex': e})
|
'ex': e})
|
||||||
return task_constants.TaskStatus.ERROR
|
return task_constants.TaskStatus.ERROR
|
||||||
@ -770,14 +770,14 @@ class EdgeApplianceDriver(object):
|
|||||||
retry_number += 1
|
retry_number += 1
|
||||||
if retry_number > max_retries:
|
if retry_number > max_retries:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to %s"), task.name)
|
LOG.exception("Failed to %s", task.name)
|
||||||
else:
|
else:
|
||||||
task.userdata['retry_number'] = retry_number
|
task.userdata['retry_number'] = retry_number
|
||||||
# Sleep twice as long as the previous retry
|
# Sleep twice as long as the previous retry
|
||||||
tts = (2 ** (retry_number - 1)) * delay
|
tts = (2 ** (retry_number - 1)) * delay
|
||||||
time.sleep(min(tts, 60))
|
time.sleep(min(tts, 60))
|
||||||
return task_constants.TaskStatus.PENDING
|
return task_constants.TaskStatus.PENDING
|
||||||
LOG.info(_LI("Task %(name)s completed."), {'name': task.name})
|
LOG.info("Task %(name)s completed.", {'name': task.name})
|
||||||
return task_constants.TaskStatus.COMPLETED
|
return task_constants.TaskStatus.COMPLETED
|
||||||
|
|
||||||
def delete_port_group(self, dvs_id, port_group_id):
|
def delete_port_group(self, dvs_id, port_group_id):
|
||||||
@ -807,14 +807,14 @@ class EdgeApplianceDriver(object):
|
|||||||
self.vcns.create_bridge(device_name, bridge)
|
self.vcns.create_bridge(device_name, bridge)
|
||||||
except exceptions.VcnsApiException:
|
except exceptions.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to create bridge in the %s"),
|
LOG.exception("Failed to create bridge in the %s",
|
||||||
device_name)
|
device_name)
|
||||||
|
|
||||||
def delete_bridge(self, device_name):
|
def delete_bridge(self, device_name):
|
||||||
try:
|
try:
|
||||||
self.vcns.delete_bridge(device_name)
|
self.vcns.delete_bridge(device_name)
|
||||||
except exceptions.VcnsApiException:
|
except exceptions.VcnsApiException:
|
||||||
LOG.exception(_LE("Failed to delete bridge in the %s"),
|
LOG.exception("Failed to delete bridge in the %s",
|
||||||
device_name)
|
device_name)
|
||||||
|
|
||||||
def update_edge_ha(self, edge_id):
|
def update_edge_ha(self, edge_id):
|
||||||
@ -825,19 +825,19 @@ class EdgeApplianceDriver(object):
|
|||||||
|
|
||||||
def update_edge_syslog(self, edge_id, syslog_config, router_id):
|
def update_edge_syslog(self, edge_id, syslog_config, router_id):
|
||||||
if 'server_ip' not in syslog_config:
|
if 'server_ip' not in syslog_config:
|
||||||
LOG.warning(_LW("Server IP missing in syslog config for %s"),
|
LOG.warning("Server IP missing in syslog config for %s",
|
||||||
router_id)
|
router_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
protocol = syslog_config.get('protocol', 'tcp')
|
protocol = syslog_config.get('protocol', 'tcp')
|
||||||
if protocol not in ['tcp', 'udp']:
|
if protocol not in ['tcp', 'udp']:
|
||||||
LOG.warning(_LW("Invalid protocol in syslog config for %s"),
|
LOG.warning("Invalid protocol in syslog config for %s",
|
||||||
router_id)
|
router_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
loglevel = syslog_config.get('log_level')
|
loglevel = syslog_config.get('log_level')
|
||||||
if loglevel and loglevel not in edge_utils.SUPPORTED_EDGE_LOG_LEVELS:
|
if loglevel and loglevel not in edge_utils.SUPPORTED_EDGE_LOG_LEVELS:
|
||||||
LOG.warning(_LW("Invalid loglevel in syslog config for %s"),
|
LOG.warning("Invalid loglevel in syslog config for %s",
|
||||||
router_id)
|
router_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
from vmware_nsx.db import nsxv_db
|
from vmware_nsx.db import nsxv_db
|
||||||
from vmware_nsx.plugins.nsx_v.vshield.common import (
|
from vmware_nsx.plugins.nsx_v.vshield.common import (
|
||||||
@ -224,8 +224,8 @@ class EdgeFirewallDriver(object):
|
|||||||
try:
|
try:
|
||||||
return self.vcns.get_firewall(edge_id)[1]
|
return self.vcns.get_firewall(edge_id)[1]
|
||||||
except vcns_exc.VcnsApiException as e:
|
except vcns_exc.VcnsApiException as e:
|
||||||
LOG.exception(_LE("Failed to get firewall with edge "
|
LOG.exception("Failed to get firewall with edge "
|
||||||
"id: %s"), edge_id)
|
"id: %s", edge_id)
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
def _get_firewall_rule_next(self, context, edge_id, rule_vseid):
|
def _get_firewall_rule_next(self, context, edge_id, rule_vseid):
|
||||||
@ -252,8 +252,8 @@ class EdgeFirewallDriver(object):
|
|||||||
response = self.vcns.get_firewall_rule(
|
response = self.vcns.get_firewall_rule(
|
||||||
edge_id, vcns_rule_id)[1]
|
edge_id, vcns_rule_id)[1]
|
||||||
except vcns_exc.VcnsApiException as e:
|
except vcns_exc.VcnsApiException as e:
|
||||||
LOG.exception(_LE("Failed to get firewall rule: %(rule_id)s "
|
LOG.exception("Failed to get firewall rule: %(rule_id)s "
|
||||||
"with edge_id: %(edge_id)s"), {
|
"with edge_id: %(edge_id)s", {
|
||||||
'rule_id': id,
|
'rule_id': id,
|
||||||
'edge_id': edge_id})
|
'edge_id': edge_id})
|
||||||
raise e
|
raise e
|
||||||
@ -267,8 +267,8 @@ class EdgeFirewallDriver(object):
|
|||||||
try:
|
try:
|
||||||
self.vcns.delete_firewall(edge_id)
|
self.vcns.delete_firewall(edge_id)
|
||||||
except vcns_exc.VcnsApiException as e:
|
except vcns_exc.VcnsApiException as e:
|
||||||
LOG.exception(_LE("Failed to delete firewall "
|
LOG.exception("Failed to delete firewall "
|
||||||
"with edge_id:%s"), edge_id)
|
"with edge_id:%s", edge_id)
|
||||||
raise e
|
raise e
|
||||||
nsxv_db.cleanup_nsxv_edge_firewallrule_binding(
|
nsxv_db.cleanup_nsxv_edge_firewallrule_binding(
|
||||||
context.session, edge_id)
|
context.session, edge_id)
|
||||||
@ -282,9 +282,9 @@ class EdgeFirewallDriver(object):
|
|||||||
self.vcns.update_firewall_rule(edge_id, vcns_rule_id, fwr_req)
|
self.vcns.update_firewall_rule(edge_id, vcns_rule_id, fwr_req)
|
||||||
except vcns_exc.VcnsApiException:
|
except vcns_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to update firewall rule: "
|
LOG.exception("Failed to update firewall rule: "
|
||||||
"%(rule_id)s "
|
"%(rule_id)s "
|
||||||
"with edge_id: %(edge_id)s"),
|
"with edge_id: %(edge_id)s",
|
||||||
{'rule_id': id,
|
{'rule_id': id,
|
||||||
'edge_id': edge_id})
|
'edge_id': edge_id})
|
||||||
|
|
||||||
@ -296,9 +296,9 @@ class EdgeFirewallDriver(object):
|
|||||||
self.vcns.delete_firewall_rule(edge_id, vcns_rule_id)
|
self.vcns.delete_firewall_rule(edge_id, vcns_rule_id)
|
||||||
except vcns_exc.VcnsApiException:
|
except vcns_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to delete firewall rule: "
|
LOG.exception("Failed to delete firewall rule: "
|
||||||
"%(rule_id)s "
|
"%(rule_id)s "
|
||||||
"with edge_id: %(edge_id)s"),
|
"with edge_id: %(edge_id)s",
|
||||||
{'rule_id': id,
|
{'rule_id': id,
|
||||||
'edge_id': edge_id})
|
'edge_id': edge_id})
|
||||||
nsxv_db.delete_nsxv_edge_firewallrule_binding(
|
nsxv_db.delete_nsxv_edge_firewallrule_binding(
|
||||||
@ -314,8 +314,8 @@ class EdgeFirewallDriver(object):
|
|||||||
edge_id, ref_vcns_rule_id, fwr_req)[0]
|
edge_id, ref_vcns_rule_id, fwr_req)[0]
|
||||||
except vcns_exc.VcnsApiException:
|
except vcns_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to add firewall rule above: "
|
LOG.exception("Failed to add firewall rule above: "
|
||||||
"%(rule_id)s with edge_id: %(edge_id)s"),
|
"%(rule_id)s with edge_id: %(edge_id)s",
|
||||||
{'rule_id': ref_vcns_rule_id,
|
{'rule_id': ref_vcns_rule_id,
|
||||||
'edge_id': edge_id})
|
'edge_id': edge_id})
|
||||||
|
|
||||||
@ -342,8 +342,8 @@ class EdgeFirewallDriver(object):
|
|||||||
edge_id, int(ref_vcns_rule_id), fwr_req)[0]
|
edge_id, int(ref_vcns_rule_id), fwr_req)[0]
|
||||||
except vcns_exc.VcnsApiException:
|
except vcns_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to add firewall rule above: "
|
LOG.exception("Failed to add firewall rule above: "
|
||||||
"%(rule_id)s with edge_id: %(edge_id)s"),
|
"%(rule_id)s with edge_id: %(edge_id)s",
|
||||||
{'rule_id': ref_vcns_rule_id,
|
{'rule_id': ref_vcns_rule_id,
|
||||||
'edge_id': edge_id})
|
'edge_id': edge_id})
|
||||||
else:
|
else:
|
||||||
@ -353,8 +353,8 @@ class EdgeFirewallDriver(object):
|
|||||||
edge_id, fwr_req)[0]
|
edge_id, fwr_req)[0]
|
||||||
except vcns_exc.VcnsApiException:
|
except vcns_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to append a firewall rule"
|
LOG.exception("Failed to append a firewall rule"
|
||||||
"with edge_id: %s"), edge_id)
|
"with edge_id: %s", edge_id)
|
||||||
|
|
||||||
objuri = header['location']
|
objuri = header['location']
|
||||||
fwr_vseid = objuri[objuri.rfind("/") + 1:]
|
fwr_vseid = objuri[objuri.rfind("/") + 1:]
|
||||||
@ -386,8 +386,8 @@ class EdgeFirewallDriver(object):
|
|||||||
self.vcns.update_firewall(edge_id, config)
|
self.vcns.update_firewall(edge_id, config)
|
||||||
except vcns_exc.VcnsApiException:
|
except vcns_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to update firewall "
|
LOG.exception("Failed to update firewall "
|
||||||
"with edge_id: %s"), edge_id)
|
"with edge_id: %s", edge_id)
|
||||||
vcns_fw_config = self._get_firewall(edge_id)
|
vcns_fw_config = self._get_firewall(edge_id)
|
||||||
|
|
||||||
nsxv_db.cleanup_nsxv_edge_firewallrule_binding(
|
nsxv_db.cleanup_nsxv_edge_firewallrule_binding(
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.plugins.nsx_v.vshield.common import (
|
from vmware_nsx.plugins.nsx_v.vshield.common import (
|
||||||
exceptions as vcns_exc)
|
exceptions as vcns_exc)
|
||||||
|
|
||||||
@ -63,9 +63,9 @@ class EdgeIPsecVpnDriver(object):
|
|||||||
ikepolicy['encryption_algorithm'] != ipsecpolicy[
|
ikepolicy['encryption_algorithm'] != ipsecpolicy[
|
||||||
'encryption_algorithm'] or
|
'encryption_algorithm'] or
|
||||||
ikepolicy['pfs'] != ipsecpolicy['pfs']):
|
ikepolicy['pfs'] != ipsecpolicy['pfs']):
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"IKEPolicy and IPsecPolicy should have consistent "
|
"IKEPolicy and IPsecPolicy should have consistent "
|
||||||
"auth_algorithm, encryption_algorithm and pfs for VSE!"))
|
"auth_algorithm, encryption_algorithm and pfs for VSE!")
|
||||||
|
|
||||||
# Check whether encryption_algorithm is allowed.
|
# Check whether encryption_algorithm is allowed.
|
||||||
encryption_algorithm = ENCRYPTION_ALGORITHM_MAP.get(
|
encryption_algorithm = ENCRYPTION_ALGORITHM_MAP.get(
|
||||||
@ -135,19 +135,19 @@ class EdgeIPsecVpnDriver(object):
|
|||||||
self.vcns.update_ipsec_config(edge_id, ipsec_config)
|
self.vcns.update_ipsec_config(edge_id, ipsec_config)
|
||||||
except vcns_exc.VcnsApiException:
|
except vcns_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to update ipsec vpn "
|
LOG.exception("Failed to update ipsec vpn "
|
||||||
"configuration with edge_id: %s"),
|
"configuration with edge_id: %s",
|
||||||
edge_id)
|
edge_id)
|
||||||
|
|
||||||
def delete_ipsec_config(self, edge_id):
|
def delete_ipsec_config(self, edge_id):
|
||||||
try:
|
try:
|
||||||
self.vcns.delete_ipsec_config(edge_id)
|
self.vcns.delete_ipsec_config(edge_id)
|
||||||
except vcns_exc.ResourceNotFound:
|
except vcns_exc.ResourceNotFound:
|
||||||
LOG.warning(_LW("IPsec config not found on edge: %s"), edge_id)
|
LOG.warning("IPsec config not found on edge: %s", edge_id)
|
||||||
except vcns_exc.VcnsApiException:
|
except vcns_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to delete ipsec vpn configuration "
|
LOG.exception("Failed to delete ipsec vpn configuration "
|
||||||
"with edge_id: %s"), edge_id)
|
"with edge_id: %s", edge_id)
|
||||||
|
|
||||||
def get_ipsec_config(self, edge_id):
|
def get_ipsec_config(self, edge_id):
|
||||||
return self.vcns.get_ipsec_config(edge_id)
|
return self.vcns.get_ipsec_config(edge_id)
|
||||||
|
@ -38,7 +38,7 @@ from neutron.plugins.common import constants as plugin_const
|
|||||||
from neutron_lib.api import validators
|
from neutron_lib.api import validators
|
||||||
from neutron_lib import exceptions as n_exc
|
from neutron_lib import exceptions as n_exc
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE, _LI, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import config as conf
|
from vmware_nsx.common import config as conf
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
from vmware_nsx.common import locking
|
from vmware_nsx.common import locking
|
||||||
@ -218,9 +218,9 @@ class EdgeManager(object):
|
|||||||
if version.LooseVersion(ver) >= version.LooseVersion('6.2.3'):
|
if version.LooseVersion(ver) >= version.LooseVersion('6.2.3'):
|
||||||
self.is_dhcp_opt_enabled = True
|
self.is_dhcp_opt_enabled = True
|
||||||
elif cfg.CONF.nsxv.dhcp_force_metadata:
|
elif cfg.CONF.nsxv.dhcp_force_metadata:
|
||||||
LOG.warning(_LW("Skipping dhcp_force_metadata param since dhcp "
|
LOG.warning("Skipping dhcp_force_metadata param since dhcp "
|
||||||
"option feature can only be supported at version "
|
"option feature can only be supported at version "
|
||||||
"6.2.3 or higher"))
|
"6.2.3 or higher")
|
||||||
self.is_dhcp_opt_enabled = False
|
self.is_dhcp_opt_enabled = False
|
||||||
|
|
||||||
def _get_per_edge_rp_filter_state(self):
|
def _get_per_edge_rp_filter_state(self):
|
||||||
@ -235,9 +235,9 @@ class EdgeManager(object):
|
|||||||
context.session, edge_id):
|
context.session, edge_id):
|
||||||
if binding['status'] == plugin_const.ERROR:
|
if binding['status'] == plugin_const.ERROR:
|
||||||
continue
|
continue
|
||||||
LOG.error(_LE('Mark router binding ERROR for resource '
|
LOG.error('Mark router binding ERROR for resource '
|
||||||
'%(res_id)s on edge %(edge_id)s due to '
|
'%(res_id)s on edge %(edge_id)s due to '
|
||||||
'%(reason)s'),
|
'%(reason)s',
|
||||||
{'res_id': binding['router_id'],
|
{'res_id': binding['router_id'],
|
||||||
'edge_id': edge_id,
|
'edge_id': edge_id,
|
||||||
'reason': error_reason})
|
'reason': error_reason})
|
||||||
@ -297,8 +297,8 @@ class EdgeManager(object):
|
|||||||
|
|
||||||
def _delete_edge(self, context, router_binding):
|
def _delete_edge(self, context, router_binding):
|
||||||
if router_binding['status'] == plugin_const.ERROR:
|
if router_binding['status'] == plugin_const.ERROR:
|
||||||
LOG.warning(_LW("Start deleting %(router_id)s corresponding"
|
LOG.warning("Start deleting %(router_id)s corresponding "
|
||||||
"edge: %(edge_id)s due to status error"),
|
"edge: %(edge_id)s due to status error",
|
||||||
{'router_id': router_binding['router_id'],
|
{'router_id': router_binding['router_id'],
|
||||||
'edge_id': router_binding['edge_id']})
|
'edge_id': router_binding['edge_id']})
|
||||||
nsxv_db.update_nsxv_router_binding(
|
nsxv_db.update_nsxv_router_binding(
|
||||||
@ -539,9 +539,9 @@ class EdgeManager(object):
|
|||||||
else:
|
else:
|
||||||
self.nsxv_manager.vcns.update_interface(edge_id, vnic_config)
|
self.nsxv_manager.vcns.update_interface(edge_id, vnic_config)
|
||||||
except nsxapi_exc.VcnsApiException:
|
except nsxapi_exc.VcnsApiException:
|
||||||
LOG.exception(_LE('Failed to delete vnic %(vnic_index)d '
|
LOG.exception('Failed to delete vnic %(vnic_index)d '
|
||||||
'tunnel %(tunnel_index)d on edge %(edge_id)s '
|
'tunnel %(tunnel_index)d on edge %(edge_id)s '
|
||||||
'for network %(net_id)s'),
|
'for network %(net_id)s',
|
||||||
{'vnic_index': vnic_index,
|
{'vnic_index': vnic_index,
|
||||||
'tunnel_index': tunnel_index,
|
'tunnel_index': tunnel_index,
|
||||||
'net_id': network_id,
|
'net_id': network_id,
|
||||||
@ -610,8 +610,8 @@ class EdgeManager(object):
|
|||||||
net_ids = nsx_db.get_net_ids(context.session, ls_id)
|
net_ids = nsx_db.get_net_ids(context.session, ls_id)
|
||||||
if net_ids:
|
if net_ids:
|
||||||
# Here should never happen, else one bug occurs
|
# Here should never happen, else one bug occurs
|
||||||
LOG.error(_LE("net %(id)s on edge %(edge_id)s "
|
LOG.error("net %(id)s on edge %(edge_id)s "
|
||||||
"overlaps with new net %(net_id)s"),
|
"overlaps with new net %(net_id)s",
|
||||||
{'id': net_ids[0],
|
{'id': net_ids[0],
|
||||||
'edge_id': edge_id,
|
'edge_id': edge_id,
|
||||||
'net_id': network_id})
|
'net_id': network_id})
|
||||||
@ -621,10 +621,10 @@ class EdgeManager(object):
|
|||||||
else:
|
else:
|
||||||
# Occurs when there are DB inconsistency
|
# Occurs when there are DB inconsistency
|
||||||
sb["is_overlapped"] = True
|
sb["is_overlapped"] = True
|
||||||
LOG.error(_LE("unexpected sub intf %(id)s on edge "
|
LOG.error("unexpected sub intf %(id)s on edge "
|
||||||
"%(edge_id)s overlaps with new net "
|
"%(edge_id)s overlaps with new net "
|
||||||
"%(net_id)s. we would update with "
|
"%(net_id)s. we would update with "
|
||||||
"deleting it for DB consistency"),
|
"deleting it for DB consistency",
|
||||||
{'id': ls_id,
|
{'id': ls_id,
|
||||||
'edge_id': edge_id,
|
'edge_id': edge_id,
|
||||||
'net_id': network_id})
|
'net_id': network_id})
|
||||||
@ -725,7 +725,7 @@ class EdgeManager(object):
|
|||||||
try:
|
try:
|
||||||
self.nsxv_manager.rename_edge(edge_id, name)
|
self.nsxv_manager.rename_edge(edge_id, name)
|
||||||
except nsxapi_exc.VcnsApiException as e:
|
except nsxapi_exc.VcnsApiException as e:
|
||||||
LOG.error(_LE("Failed to update edge: %s"),
|
LOG.error("Failed to update edge: %s",
|
||||||
e.response)
|
e.response)
|
||||||
self.nsxv_manager.callbacks.complete_edge_update(
|
self.nsxv_manager.callbacks.complete_edge_update(
|
||||||
context, edge_id, resource_id, False, set_errors=True)
|
context, edge_id, resource_id, False, set_errors=True)
|
||||||
@ -748,8 +748,8 @@ class EdgeManager(object):
|
|||||||
"""Try to collect one edge to pool."""
|
"""Try to collect one edge to pool."""
|
||||||
binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
|
binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
|
||||||
if not binding:
|
if not binding:
|
||||||
LOG.warning(_LW("router binding for router: %s "
|
LOG.warning("router binding for router: %s "
|
||||||
"not found"), router_id)
|
"not found", router_id)
|
||||||
return
|
return
|
||||||
dist = (binding['edge_type'] == nsxv_constants.VDR_EDGE)
|
dist = (binding['edge_type'] == nsxv_constants.VDR_EDGE)
|
||||||
edge_id = binding['edge_id']
|
edge_id = binding['edge_id']
|
||||||
@ -893,8 +893,8 @@ class EdgeManager(object):
|
|||||||
def rename_lrouter(self, context, router_id, new_name):
|
def rename_lrouter(self, context, router_id, new_name):
|
||||||
binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
|
binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
|
||||||
if not binding or not binding['edge_id']:
|
if not binding or not binding['edge_id']:
|
||||||
LOG.warning(_LW("router binding for router: %s "
|
LOG.warning("router binding for router: %s "
|
||||||
"not found"), router_id)
|
"not found", router_id)
|
||||||
return
|
return
|
||||||
edge_id = binding['edge_id']
|
edge_id = binding['edge_id']
|
||||||
with locking.LockManager.get_lock(str(edge_id)):
|
with locking.LockManager.get_lock(str(edge_id)):
|
||||||
@ -905,8 +905,8 @@ class EdgeManager(object):
|
|||||||
# get the router edge-id
|
# get the router edge-id
|
||||||
binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
|
binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
|
||||||
if not binding or not binding['edge_id']:
|
if not binding or not binding['edge_id']:
|
||||||
LOG.warning(_LW("router binding for router: %s "
|
LOG.warning("router binding for router: %s "
|
||||||
"not found"), router_id)
|
"not found", router_id)
|
||||||
return
|
return
|
||||||
edge_id = binding['edge_id']
|
edge_id = binding['edge_id']
|
||||||
with locking.LockManager.get_lock(str(edge_id)):
|
with locking.LockManager.get_lock(str(edge_id)):
|
||||||
@ -1041,8 +1041,8 @@ class EdgeManager(object):
|
|||||||
'169.254.169.254/32',
|
'169.254.169.254/32',
|
||||||
dhcp_ip)
|
dhcp_ip)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("Failed to find the dhcp port on subnet "
|
LOG.error("Failed to find the dhcp port on subnet "
|
||||||
"%s to do metadata host route insertion"),
|
"%s to do metadata host route insertion",
|
||||||
subnet_id)
|
subnet_id)
|
||||||
|
|
||||||
def update_dhcp_service_config(self, context, edge_id):
|
def update_dhcp_service_config(self, context, edge_id):
|
||||||
@ -1096,8 +1096,8 @@ class EdgeManager(object):
|
|||||||
if not self.check_edge_active_at_backend(new_id):
|
if not self.check_edge_active_at_backend(new_id):
|
||||||
# Remove edge_id from available edges pool.
|
# Remove edge_id from available edges pool.
|
||||||
available_edge_ids.remove(new_id)
|
available_edge_ids.remove(new_id)
|
||||||
LOG.warning(_LW("Skipping edge: %s due to inactive status on "
|
LOG.warning("Skipping edge: %s due to inactive status on "
|
||||||
"the backend."), new_id)
|
"the backend.", new_id)
|
||||||
else:
|
else:
|
||||||
return new_id
|
return new_id
|
||||||
|
|
||||||
@ -1159,8 +1159,8 @@ class EdgeManager(object):
|
|||||||
old_binding = nsxv_db.get_edge_vnic_binding(
|
old_binding = nsxv_db.get_edge_vnic_binding(
|
||||||
context.session, edge_id, network_id)
|
context.session, edge_id, network_id)
|
||||||
if not old_binding:
|
if not old_binding:
|
||||||
LOG.error(_LE("Remove network %(id)s failed since no binding "
|
LOG.error("Remove network %(id)s failed since no binding "
|
||||||
"found on edge %(edge_id)s"),
|
"found on edge %(edge_id)s",
|
||||||
{'id': network_id,
|
{'id': network_id,
|
||||||
'edge_id': edge_id})
|
'edge_id': edge_id})
|
||||||
self._delete_dhcp_router_binding(context, network_id, edge_id)
|
self._delete_dhcp_router_binding(context, network_id, edge_id)
|
||||||
@ -1176,8 +1176,8 @@ class EdgeManager(object):
|
|||||||
self.update_dhcp_service_config(context, edge_id)
|
self.update_dhcp_service_config(context, edge_id)
|
||||||
|
|
||||||
except nsxapi_exc.VcnsApiException:
|
except nsxapi_exc.VcnsApiException:
|
||||||
LOG.exception(_LE('Failed to delete vnic %(vnic_index)d '
|
LOG.exception('Failed to delete vnic %(vnic_index)d '
|
||||||
'tunnel %(tunnel_index)d on edge %(edge_id)s'),
|
'tunnel %(tunnel_index)d on edge %(edge_id)s',
|
||||||
{'vnic_index': old_vnic_index,
|
{'vnic_index': old_vnic_index,
|
||||||
'tunnel_index': old_tunnel_index,
|
'tunnel_index': old_tunnel_index,
|
||||||
'edge_id': edge_id})
|
'edge_id': edge_id})
|
||||||
@ -1185,8 +1185,8 @@ class EdgeManager(object):
|
|||||||
context, edge_id,
|
context, edge_id,
|
||||||
error_reason="remove network from dhcp edge failure")
|
error_reason="remove network from dhcp edge failure")
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Failed to delete vnic %(vnic_index)d '
|
LOG.exception('Failed to delete vnic %(vnic_index)d '
|
||||||
'tunnel %(tunnel_index)d on edge %(edge_id)s'),
|
'tunnel %(tunnel_index)d on edge %(edge_id)s',
|
||||||
{'vnic_index': old_vnic_index,
|
{'vnic_index': old_vnic_index,
|
||||||
'tunnel_index': old_tunnel_index,
|
'tunnel_index': old_tunnel_index,
|
||||||
'edge_id': edge_id})
|
'edge_id': edge_id})
|
||||||
@ -1339,7 +1339,7 @@ class EdgeManager(object):
|
|||||||
edge_binding = nsxv_db.get_nsxv_router_binding(context.session,
|
edge_binding = nsxv_db.get_nsxv_router_binding(context.session,
|
||||||
resource_id)
|
resource_id)
|
||||||
if not edge_binding:
|
if not edge_binding:
|
||||||
LOG.warning(_LW('Edge binding does not exist for network %s'),
|
LOG.warning('Edge binding does not exist for network %s',
|
||||||
network_id)
|
network_id)
|
||||||
return
|
return
|
||||||
dhcp_binding = nsxv_db.get_edge_vnic_binding(context.session,
|
dhcp_binding = nsxv_db.get_edge_vnic_binding(context.session,
|
||||||
@ -1368,9 +1368,9 @@ class EdgeManager(object):
|
|||||||
except nsxapi_exc.VcnsApiException:
|
except nsxapi_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(
|
LOG.exception(
|
||||||
_LE('Failed to update the dhcp service for '
|
'Failed to update the dhcp service for '
|
||||||
'%(edge_id)s on vnic %(vnic_index)d '
|
'%(edge_id)s on vnic %(vnic_index)d '
|
||||||
'tunnel %(tunnel_index)d'),
|
'tunnel %(tunnel_index)d',
|
||||||
{'edge_id': edge_id,
|
{'edge_id': edge_id,
|
||||||
'vnic_index': vnic_index,
|
'vnic_index': vnic_index,
|
||||||
'tunnel_index': tunnel_index})
|
'tunnel_index': tunnel_index})
|
||||||
@ -1380,9 +1380,9 @@ class EdgeManager(object):
|
|||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(
|
LOG.exception(
|
||||||
_LE('Failed to update the dhcp service for '
|
'Failed to update the dhcp service for '
|
||||||
'%(edge_id)s on vnic %(vnic_index)d '
|
'%(edge_id)s on vnic %(vnic_index)d '
|
||||||
'tunnel %(tunnel_index)d'),
|
'tunnel %(tunnel_index)d',
|
||||||
{'edge_id': edge_id,
|
{'edge_id': edge_id,
|
||||||
'vnic_index': vnic_index,
|
'vnic_index': vnic_index,
|
||||||
'tunnel_index': tunnel_index})
|
'tunnel_index': tunnel_index})
|
||||||
@ -1413,10 +1413,10 @@ class EdgeManager(object):
|
|||||||
network_id)
|
network_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE('Failed to delete the tunnel '
|
LOG.exception('Failed to delete the tunnel '
|
||||||
'%(tunnel_index)d on vnic '
|
'%(tunnel_index)d on vnic '
|
||||||
'%(vnic_index)d'
|
'%(vnic_index)d'
|
||||||
'from DHCP Edge %(edge_id)s'),
|
'from DHCP Edge %(edge_id)s',
|
||||||
{'tunnel_index': tunnel_index,
|
{'tunnel_index': tunnel_index,
|
||||||
'vnic_index': vnic_index,
|
'vnic_index': vnic_index,
|
||||||
'edge_id': edge_id})
|
'edge_id': edge_id})
|
||||||
@ -1470,13 +1470,13 @@ class EdgeManager(object):
|
|||||||
except db_exc.DBDuplicateEntry as e:
|
except db_exc.DBDuplicateEntry as e:
|
||||||
# Could have garbage binding in the DB - warn and overwrite
|
# Could have garbage binding in the DB - warn and overwrite
|
||||||
if 'PRIMARY' in e.columns:
|
if 'PRIMARY' in e.columns:
|
||||||
LOG.warning(_LW('Conflict found in VDR DHCP bindings - '
|
LOG.warning('Conflict found in VDR DHCP bindings - '
|
||||||
'router %s was already bound'),
|
'router %s was already bound',
|
||||||
vdr_router_id)
|
vdr_router_id)
|
||||||
del_vdr = vdr_router_id
|
del_vdr = vdr_router_id
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Conflict found in VDR DHCP bindings - '
|
LOG.warning('Conflict found in VDR DHCP bindings - '
|
||||||
'DHCP edge %s was already bound'),
|
'DHCP edge %s was already bound',
|
||||||
dhcp_edge_id)
|
dhcp_edge_id)
|
||||||
bind = nsxv_db.get_vdr_dhcp_binding_by_edge(
|
bind = nsxv_db.get_vdr_dhcp_binding_by_edge(
|
||||||
context.session, dhcp_edge_id)
|
context.session, dhcp_edge_id)
|
||||||
@ -1491,8 +1491,8 @@ class EdgeManager(object):
|
|||||||
nsxv_db.add_vdr_dhcp_binding(context.session,
|
nsxv_db.add_vdr_dhcp_binding(context.session,
|
||||||
vdr_router_id, dhcp_edge_id)
|
vdr_router_id, dhcp_edge_id)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('Database conflict could not be recovered '
|
LOG.error('Database conflict could not be recovered '
|
||||||
'for VDR %(vdr)s DHCP edge %(dhcp)s'),
|
'for VDR %(vdr)s DHCP edge %(dhcp)s',
|
||||||
{'vdr': vdr_router_id, 'dhcp': dhcp_edge_id})
|
{'vdr': vdr_router_id, 'dhcp': dhcp_edge_id})
|
||||||
|
|
||||||
address_groups = self.plugin._create_network_dhcp_address_group(
|
address_groups = self.plugin._create_network_dhcp_address_group(
|
||||||
@ -1735,14 +1735,14 @@ class EdgeManager(object):
|
|||||||
context.session, plr_id)
|
context.session, plr_id)
|
||||||
|
|
||||||
if router_binding is None:
|
if router_binding is None:
|
||||||
LOG.error(_LE("Router binding not found for router: %s"),
|
LOG.error("Router binding not found for router: %s",
|
||||||
router_id)
|
router_id)
|
||||||
else:
|
else:
|
||||||
plr_edge_id = router_binding.edge_id
|
plr_edge_id = router_binding.edge_id
|
||||||
vnic_binding = nsxv_db.get_edge_vnic_binding(
|
vnic_binding = nsxv_db.get_edge_vnic_binding(
|
||||||
context.session, plr_edge_id, lswitch_id)
|
context.session, plr_edge_id, lswitch_id)
|
||||||
if vnic_binding is None:
|
if vnic_binding is None:
|
||||||
LOG.error(_LE("Vnic binding not found for router: %s"),
|
LOG.error("Vnic binding not found for router: %s",
|
||||||
router_id)
|
router_id)
|
||||||
else:
|
else:
|
||||||
# Clear static routes before delete internal vnic
|
# Clear static routes before delete internal vnic
|
||||||
@ -1764,7 +1764,7 @@ class EdgeManager(object):
|
|||||||
tlr_vnic_binding = nsxv_db.get_edge_vnic_binding(
|
tlr_vnic_binding = nsxv_db.get_edge_vnic_binding(
|
||||||
context.session, tlr_edge_id, lswitch_id)
|
context.session, tlr_edge_id, lswitch_id)
|
||||||
if tlr_vnic_binding is None:
|
if tlr_vnic_binding is None:
|
||||||
LOG.error(_LE("Vnic binding not found for router: %s"), router_id)
|
LOG.error("Vnic binding not found for router: %s", router_id)
|
||||||
else:
|
else:
|
||||||
self.nsxv_manager.delete_vdr_internal_interface(
|
self.nsxv_manager.delete_vdr_internal_interface(
|
||||||
tlr_edge_id, tlr_vnic_binding.vnic_index)
|
tlr_edge_id, tlr_vnic_binding.vnic_index)
|
||||||
@ -1775,7 +1775,7 @@ class EdgeManager(object):
|
|||||||
# Then delete the internal lswitch
|
# Then delete the internal lswitch
|
||||||
self.nsxv_manager.delete_virtual_wire(lswitch_id)
|
self.nsxv_manager.delete_virtual_wire(lswitch_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning(_LW("Failed to delete virtual wire: %s"), lswitch_id)
|
LOG.warning("Failed to delete virtual wire: %s", lswitch_id)
|
||||||
|
|
||||||
def get_routers_on_edge(self, context, edge_id):
|
def get_routers_on_edge(self, context, edge_id):
|
||||||
router_ids = []
|
router_ids = []
|
||||||
@ -1793,8 +1793,8 @@ class EdgeManager(object):
|
|||||||
valid_router_ids = [ele['id'] for ele in valid_router_ids]
|
valid_router_ids = [ele['id'] for ele in valid_router_ids]
|
||||||
|
|
||||||
if set(valid_router_ids) != set(router_ids):
|
if set(valid_router_ids) != set(router_ids):
|
||||||
LOG.error(_LE("Get invalid router bindings with "
|
LOG.error("Get invalid router bindings with "
|
||||||
"router ids: %s"),
|
"router ids: %s",
|
||||||
str(set(router_ids) - set(valid_router_ids)))
|
str(set(router_ids) - set(valid_router_ids)))
|
||||||
return valid_router_ids
|
return valid_router_ids
|
||||||
|
|
||||||
@ -1849,7 +1849,7 @@ class EdgeManager(object):
|
|||||||
else:
|
else:
|
||||||
# TODO(yangyu): Remove conflict_network_ids
|
# TODO(yangyu): Remove conflict_network_ids
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW("Failed to query conflict_router_ids"))
|
"Failed to query conflict_router_ids")
|
||||||
if available_edge_id:
|
if available_edge_id:
|
||||||
edge_binding = nsxv_db.get_nsxv_router_bindings_by_edge(
|
edge_binding = nsxv_db.get_nsxv_router_bindings_by_edge(
|
||||||
context.session, available_edge_id)[0]
|
context.session, available_edge_id)[0]
|
||||||
@ -1923,23 +1923,23 @@ class EdgeManager(object):
|
|||||||
self.nsxv_manager.vcns.delete_dhcp_binding(
|
self.nsxv_manager.vcns.delete_dhcp_binding(
|
||||||
edge_id, dhcp_binding.binding_id)
|
edge_id, dhcp_binding.binding_id)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Failed to find binding on edge "
|
LOG.warning("Failed to find binding on edge "
|
||||||
"%(edge_id)s for port "
|
"%(edge_id)s for port "
|
||||||
"%(port_id)s with %(binding_id)s"),
|
"%(port_id)s with %(binding_id)s",
|
||||||
{'edge_id': edge_id,
|
{'edge_id': edge_id,
|
||||||
'port_id': port_id,
|
'port_id': port_id,
|
||||||
'binding_id': dhcp_binding.binding_id})
|
'binding_id': dhcp_binding.binding_id})
|
||||||
nsxv_db.delete_edge_dhcp_static_binding(
|
nsxv_db.delete_edge_dhcp_static_binding(
|
||||||
context.session, edge_id, mac_address)
|
context.session, edge_id, mac_address)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Failed to find dhcp binding on edge "
|
LOG.warning("Failed to find dhcp binding on edge "
|
||||||
"%(edge_id)s to DELETE for port "
|
"%(edge_id)s to DELETE for port "
|
||||||
"%(port_id)s"),
|
"%(port_id)s",
|
||||||
{'edge_id': edge_id,
|
{'edge_id': edge_id,
|
||||||
'port_id': port_id})
|
'port_id': port_id})
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Failed to find edge_id to delete dhcp "
|
LOG.warning("Failed to find edge_id to delete dhcp "
|
||||||
"binding for port %(port_id)s"),
|
"binding for port %(port_id)s",
|
||||||
{'port_id': port_id})
|
{'port_id': port_id})
|
||||||
|
|
||||||
@vcns.retry_upon_exception(nsxapi_exc.VcnsApiException, max_delay=10)
|
@vcns.retry_upon_exception(nsxapi_exc.VcnsApiException, max_delay=10)
|
||||||
@ -1993,8 +1993,8 @@ class EdgeManager(object):
|
|||||||
self.plugin.get_port(context, port_id)
|
self.plugin.get_port(context, port_id)
|
||||||
except n_exc.PortNotFound:
|
except n_exc.PortNotFound:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW("port %(port_id)s is deleted, so we would pass "
|
"port %(port_id)s is deleted, so we would pass "
|
||||||
"creating dhcp binding on edge %(edge_id)s"),
|
"creating dhcp binding on edge %(edge_id)s",
|
||||||
{'port_id': port_id,
|
{'port_id': port_id,
|
||||||
'edge_id': edge_id})
|
'edge_id': edge_id})
|
||||||
return
|
return
|
||||||
@ -2016,8 +2016,8 @@ class EdgeManager(object):
|
|||||||
nsxv_db.delete_edge_dhcp_static_binding(
|
nsxv_db.delete_edge_dhcp_static_binding(
|
||||||
context.session, edge_id, mac_address)
|
context.session, edge_id, mac_address)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Failed to create dhcp bindings since dhcp edge "
|
LOG.warning("Failed to create dhcp bindings since dhcp edge "
|
||||||
"for net %s not found at the backend"),
|
"for net %s not found at the backend",
|
||||||
network_id)
|
network_id)
|
||||||
|
|
||||||
def _get_syslog_config_from_flavor(self, context, router_id, flavor_id):
|
def _get_syslog_config_from_flavor(self, context, router_id, flavor_id):
|
||||||
@ -2036,7 +2036,7 @@ class EdgeManager(object):
|
|||||||
|
|
||||||
# If no binding was found, no interface to update - exit
|
# If no binding was found, no interface to update - exit
|
||||||
if not binding:
|
if not binding:
|
||||||
LOG.error(_LE('Edge binding not found for router %s'), router_id)
|
LOG.error('Edge binding not found for router %s', router_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
net_bindings = nsxv_db.get_network_bindings(
|
net_bindings = nsxv_db.get_network_bindings(
|
||||||
@ -2077,13 +2077,13 @@ class EdgeManager(object):
|
|||||||
if address_group['primaryAddress']:
|
if address_group['primaryAddress']:
|
||||||
address_groups.append(address_group)
|
address_groups.append(address_group)
|
||||||
if ipaddr not in addr_list:
|
if ipaddr not in addr_list:
|
||||||
LOG.error(_LE("primary address %s of ext vnic is not "
|
LOG.error("primary address %s of ext vnic is not "
|
||||||
"configured"), ipaddr)
|
"configured", ipaddr)
|
||||||
if secondary:
|
if secondary:
|
||||||
missed_ip_sec = set(secondary) - set(addr_list)
|
missed_ip_sec = set(secondary) - set(addr_list)
|
||||||
if missed_ip_sec:
|
if missed_ip_sec:
|
||||||
LOG.error(_LE("secondary address %s of ext vnic are not "
|
LOG.error("secondary address %s of ext vnic are not "
|
||||||
"configured"), str(missed_ip_sec))
|
"configured", str(missed_ip_sec))
|
||||||
nsxv_manager.update_interface(router_id, binding['edge_id'],
|
nsxv_manager.update_interface(router_id, binding['edge_id'],
|
||||||
vcns_const.EXTERNAL_VNIC_INDEX,
|
vcns_const.EXTERNAL_VNIC_INDEX,
|
||||||
vcns_network_id,
|
vcns_network_id,
|
||||||
@ -2127,7 +2127,7 @@ def delete_lrouter(nsxv_manager, context, router_id, dist=False):
|
|||||||
# delete edge
|
# delete edge
|
||||||
nsxv_manager.delete_edge(context, router_id, edge_id, dist=dist)
|
nsxv_manager.delete_edge(context, router_id, edge_id, dist=dist)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("router binding for router: %s not found"), router_id)
|
LOG.warning("router binding for router: %s not found", router_id)
|
||||||
|
|
||||||
|
|
||||||
def remove_irrelevant_keys_from_edge_request(edge_request):
|
def remove_irrelevant_keys_from_edge_request(edge_request):
|
||||||
@ -2237,7 +2237,7 @@ def get_routes(edge_manager, context, router_id):
|
|||||||
|
|
||||||
binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
|
binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
|
||||||
if not binding:
|
if not binding:
|
||||||
LOG.error(_LE('Router binding not found for router %s'), router_id)
|
LOG.error('Router binding not found for router %s', router_id)
|
||||||
return []
|
return []
|
||||||
|
|
||||||
edge_id = binding['edge_id']
|
edge_id = binding['edge_id']
|
||||||
@ -2245,7 +2245,7 @@ def get_routes(edge_manager, context, router_id):
|
|||||||
vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_edge(context.session,
|
vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_edge(context.session,
|
||||||
edge_id)
|
edge_id)
|
||||||
if not vnic_bindings:
|
if not vnic_bindings:
|
||||||
LOG.error(_LE('vNic binding not found for edge %s'), edge_id)
|
LOG.error('vNic binding not found for edge %s', edge_id)
|
||||||
return []
|
return []
|
||||||
|
|
||||||
h, routes = edge_manager.vcns.get_routes(edge_id)
|
h, routes = edge_manager.vcns.get_routes(edge_id)
|
||||||
@ -2265,15 +2265,15 @@ def get_routes(edge_manager, context, router_id):
|
|||||||
def update_routes(edge_manager, context, router_id, routes, nexthop=None):
|
def update_routes(edge_manager, context, router_id, routes, nexthop=None):
|
||||||
binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
|
binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
|
||||||
if not binding:
|
if not binding:
|
||||||
LOG.error(_LE('Router binding not found for router %s'), router_id)
|
LOG.error('Router binding not found for router %s', router_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
edge_id = binding['edge_id']
|
edge_id = binding['edge_id']
|
||||||
edge_routes = []
|
edge_routes = []
|
||||||
for route in routes:
|
for route in routes:
|
||||||
if not route.get('network_id'):
|
if not route.get('network_id'):
|
||||||
LOG.warning(_LW("There is no network info for the route %s, so "
|
LOG.warning("There is no network info for the route %s, so "
|
||||||
"the route entry would not be executed!"), route)
|
"the route entry would not be executed!", route)
|
||||||
continue
|
continue
|
||||||
if route.get('external'):
|
if route.get('external'):
|
||||||
edge_routes.append({
|
edge_routes.append({
|
||||||
@ -2289,10 +2289,10 @@ def update_routes(edge_manager, context, router_id, routes, nexthop=None):
|
|||||||
'cidr': route['destination'],
|
'cidr': route['destination'],
|
||||||
'nexthop': route['nexthop']})
|
'nexthop': route['nexthop']})
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("vnic binding on edge %(edge_id)s for network "
|
LOG.error("vnic binding on edge %(edge_id)s for network "
|
||||||
"%(net_id)s not found, so route: destination: "
|
"%(net_id)s not found, so route: destination: "
|
||||||
"%(dest)s, nexthop: %(nexthop)s can't be "
|
"%(dest)s, nexthop: %(nexthop)s can't be "
|
||||||
"applied!"),
|
"applied!",
|
||||||
{'edge_id': edge_id,
|
{'edge_id': edge_id,
|
||||||
'net_id': route['network_id'],
|
'net_id': route['network_id'],
|
||||||
'dest': route['destination'],
|
'dest': route['destination'],
|
||||||
@ -2408,7 +2408,7 @@ def delete_interface(nsxv_manager, context, router_id, network_id, dist=False):
|
|||||||
# Get edge id
|
# Get edge id
|
||||||
binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
|
binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
|
||||||
if not binding:
|
if not binding:
|
||||||
LOG.warning(_LW("Failed to find the router binding for router %s"),
|
LOG.warning("Failed to find the router binding for router %s",
|
||||||
router_id)
|
router_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -2424,8 +2424,8 @@ def delete_interface(nsxv_manager, context, router_id, network_id, dist=False):
|
|||||||
edge_vnic_binding = nsxv_db.get_edge_vnic_binding(
|
edge_vnic_binding = nsxv_db.get_edge_vnic_binding(
|
||||||
context.session, edge_id, network_id)
|
context.session, edge_id, network_id)
|
||||||
if not edge_vnic_binding:
|
if not edge_vnic_binding:
|
||||||
LOG.warning(_LW("Failed to find the network %(net_id)s "
|
LOG.warning("Failed to find the network %(net_id)s "
|
||||||
"corresponding vnic index on edge %(edge_id)s"),
|
"corresponding vnic index on edge %(edge_id)s",
|
||||||
{'net_id': network_id,
|
{'net_id': network_id,
|
||||||
'edge_id': edge_id})
|
'edge_id': edge_id})
|
||||||
return
|
return
|
||||||
@ -2468,7 +2468,7 @@ def update_nat_rules(nsxv_manager, context, router_id, snat, dnat):
|
|||||||
|
|
||||||
nsxv_manager.update_nat_rules(binding['edge_id'], snat, dnat, indices)
|
nsxv_manager.update_nat_rules(binding['edge_id'], snat, dnat, indices)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Bindings do not exists for %s"), router_id)
|
LOG.warning("Bindings do not exists for %s", router_id)
|
||||||
|
|
||||||
|
|
||||||
def clear_nat_rules(nsxv_manager, context, router_id):
|
def clear_nat_rules(nsxv_manager, context, router_id):
|
||||||
@ -2484,7 +2484,7 @@ def update_firewall(nsxv_manager, context, router_id, firewall,
|
|||||||
nsxv_manager.update_firewall(edge_id, firewall, context,
|
nsxv_manager.update_firewall(edge_id, firewall, context,
|
||||||
allow_external=allow_external)
|
allow_external=allow_external)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Bindings do not exists for %s"), router_id)
|
LOG.warning("Bindings do not exists for %s", router_id)
|
||||||
|
|
||||||
|
|
||||||
def check_network_in_use_at_backend(context, network_id):
|
def check_network_in_use_at_backend(context, network_id):
|
||||||
@ -2498,15 +2498,15 @@ def check_network_in_use_at_backend(context, network_id):
|
|||||||
context.session, network_id)
|
context.session, network_id)
|
||||||
if not edge_vnic_bindings:
|
if not edge_vnic_bindings:
|
||||||
return
|
return
|
||||||
LOG.warning(_LW('NSXv: network is still in use at the backend'))
|
LOG.warning('NSXv: network is still in use at the backend')
|
||||||
LOG.error(_LE('NSXv: network is still in use at the backend'))
|
LOG.error('NSXv: network is still in use at the backend')
|
||||||
|
|
||||||
|
|
||||||
def default_loglevel_modifier(config, level):
|
def default_loglevel_modifier(config, level):
|
||||||
"""Modify log level settings in edge config bulk (standard syntax)"""
|
"""Modify log level settings in edge config bulk (standard syntax)"""
|
||||||
|
|
||||||
if 'logging' not in config:
|
if 'logging' not in config:
|
||||||
LOG.error(_LE("Logging section missing in configuration"))
|
LOG.error("Logging section missing in configuration")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
enable = True
|
enable = True
|
||||||
@ -2523,7 +2523,7 @@ def routing_loglevel_modifier(config, level):
|
|||||||
"""Modify log level in routing global settings"""
|
"""Modify log level in routing global settings"""
|
||||||
|
|
||||||
if 'routingGlobalConfig' not in config:
|
if 'routingGlobalConfig' not in config:
|
||||||
LOG.error(_LE("routingGlobalConfig section missing in config"))
|
LOG.error("routingGlobalConfig section missing in config")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return default_loglevel_modifier(config['routingGlobalConfig'],
|
return default_loglevel_modifier(config['routingGlobalConfig'],
|
||||||
@ -2547,11 +2547,11 @@ def get_loglevel_modifier(module, level):
|
|||||||
def update_edge_loglevel(vcns, edge_id, module, level):
|
def update_edge_loglevel(vcns, edge_id, module, level):
|
||||||
"""Update loglevel on edge for specified module"""
|
"""Update loglevel on edge for specified module"""
|
||||||
if module not in SUPPORTED_EDGE_LOG_MODULES:
|
if module not in SUPPORTED_EDGE_LOG_MODULES:
|
||||||
LOG.error(_LE("Unrecognized logging module %s - ignored"), module)
|
LOG.error("Unrecognized logging module %s - ignored", module)
|
||||||
return
|
return
|
||||||
|
|
||||||
if level not in SUPPORTED_EDGE_LOG_LEVELS:
|
if level not in SUPPORTED_EDGE_LOG_LEVELS:
|
||||||
LOG.error(_LE("Unrecognized log level %s - ignored"), level)
|
LOG.error("Unrecognized log level %s - ignored", level)
|
||||||
return
|
return
|
||||||
|
|
||||||
vcns.update_edge_config_with_modifier(edge_id, module,
|
vcns.update_edge_config_with_modifier(edge_id, module,
|
||||||
@ -2570,22 +2570,22 @@ def update_edge_host_groups(vcns, edge_id, dvs, availability_zone,
|
|||||||
availability_zone.resource_pool)
|
availability_zone.resource_pool)
|
||||||
for vm in vms:
|
for vm in vms:
|
||||||
if vm in configured_vms:
|
if vm in configured_vms:
|
||||||
LOG.info(_LI('Edge %s already configured'), edge_id)
|
LOG.info('Edge %s already configured', edge_id)
|
||||||
return
|
return
|
||||||
# Ensure random distribution of the VMs
|
# Ensure random distribution of the VMs
|
||||||
if availability_zone.ha_placement_random:
|
if availability_zone.ha_placement_random:
|
||||||
random.shuffle(vms)
|
random.shuffle(vms)
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI('Create DRS groups for '
|
LOG.info('Create DRS groups for '
|
||||||
'%(vms)s on edge %(edge_id)s'),
|
'%(vms)s on edge %(edge_id)s',
|
||||||
{'vms': vms,
|
{'vms': vms,
|
||||||
'edge_id': edge_id})
|
'edge_id': edge_id})
|
||||||
dvs.update_cluster_edge_failover(
|
dvs.update_cluster_edge_failover(
|
||||||
availability_zone.resource_pool,
|
availability_zone.resource_pool,
|
||||||
vms, edge_id, availability_zone.edge_host_groups)
|
vms, edge_id, availability_zone.edge_host_groups)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE('Unable to create DRS groups for '
|
LOG.error('Unable to create DRS groups for '
|
||||||
'%(vms)s on edge %(edge_id)s. Error: %(e)s'),
|
'%(vms)s on edge %(edge_id)s. Error: %(e)s',
|
||||||
{'vms': vms,
|
{'vms': vms,
|
||||||
'edge_id': edge_id,
|
'edge_id': edge_id,
|
||||||
'e': e})
|
'e': e})
|
||||||
@ -2593,12 +2593,12 @@ def update_edge_host_groups(vcns, edge_id, dvs, availability_zone,
|
|||||||
|
|
||||||
def clean_host_groups(dvs, availability_zone):
|
def clean_host_groups(dvs, availability_zone):
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI('Cleaning up host groups for AZ %s'),
|
LOG.info('Cleaning up host groups for AZ %s',
|
||||||
availability_zone.name)
|
availability_zone.name)
|
||||||
dvs.cluster_host_group_cleanup(
|
dvs.cluster_host_group_cleanup(
|
||||||
availability_zone.resource_pool)
|
availability_zone.resource_pool)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE('Unable to cleanup. Error: %s'), e)
|
LOG.error('Unable to cleanup. Error: %s', e)
|
||||||
|
|
||||||
|
|
||||||
class NsxVCallbacks(object):
|
class NsxVCallbacks(object):
|
||||||
@ -2621,7 +2621,7 @@ class NsxVCallbacks(object):
|
|||||||
router_db = self.plugin._get_router(context, router_id)
|
router_db = self.plugin._get_router(context, router_id)
|
||||||
except l3.RouterNotFound:
|
except l3.RouterNotFound:
|
||||||
# Router might have been deleted before deploy finished
|
# Router might have been deleted before deploy finished
|
||||||
LOG.warning(_LW("Router %s not found"), name)
|
LOG.warning("Router %s not found", name)
|
||||||
|
|
||||||
if deploy_successful:
|
if deploy_successful:
|
||||||
metadata_proxy_handler = self.plugin.get_metadata_proxy_handler(
|
metadata_proxy_handler = self.plugin.get_metadata_proxy_handler(
|
||||||
@ -2652,7 +2652,7 @@ class NsxVCallbacks(object):
|
|||||||
update_edge_host_groups(self.plugin.nsx_v.vcns, edge_id,
|
update_edge_host_groups(self.plugin.nsx_v.vcns, edge_id,
|
||||||
self._vcm, availability_zone)
|
self._vcm, availability_zone)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("Failed to deploy Edge for router %s"), name)
|
LOG.error("Failed to deploy Edge for router %s", name)
|
||||||
if router_db:
|
if router_db:
|
||||||
router_db['status'] = plugin_const.ERROR
|
router_db['status'] = plugin_const.ERROR
|
||||||
nsxv_db.update_nsxv_router_binding(
|
nsxv_db.update_nsxv_router_binding(
|
||||||
@ -2670,8 +2670,8 @@ class NsxVCallbacks(object):
|
|||||||
{'edge_id': edge_id,
|
{'edge_id': edge_id,
|
||||||
'router_id': router_id})
|
'router_id': router_id})
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("Failed to update %(edge_id)s for router "
|
LOG.error("Failed to update %(edge_id)s for router "
|
||||||
"%(router_id)s"),
|
"%(router_id)s",
|
||||||
{'edge_id': edge_id,
|
{'edge_id': edge_id,
|
||||||
'router_id': router_id})
|
'router_id': router_id})
|
||||||
admin_ctx = q_context.get_admin_context()
|
admin_ctx = q_context.get_admin_context()
|
||||||
@ -2686,7 +2686,7 @@ class NsxVCallbacks(object):
|
|||||||
router_db['status'] = plugin_const.ERROR
|
router_db['status'] = plugin_const.ERROR
|
||||||
except l3.RouterNotFound:
|
except l3.RouterNotFound:
|
||||||
# Router might have been deleted before deploy finished
|
# Router might have been deleted before deploy finished
|
||||||
LOG.warning(_LW("Router %s not found"), router_id)
|
LOG.warning("Router %s not found", router_id)
|
||||||
|
|
||||||
def interface_update_result(self, task):
|
def interface_update_result(self, task):
|
||||||
LOG.debug("interface_update_result %d", task.status)
|
LOG.debug("interface_update_result %d", task.status)
|
||||||
|
@ -24,7 +24,7 @@ from oslo_log import log as logging
|
|||||||
from oslo_service import loopingcall
|
from oslo_service import loopingcall
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE, _LI
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.plugins.nsx_v.vshield.tasks import constants
|
from vmware_nsx.plugins.nsx_v.vshield.tasks import constants
|
||||||
|
|
||||||
DEFAULT_INTERVAL = 1000
|
DEFAULT_INTERVAL = 1000
|
||||||
@ -96,8 +96,8 @@ class Task(object):
|
|||||||
try:
|
try:
|
||||||
func(self)
|
func(self)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Task %(task)s encountered exception in "
|
LOG.exception("Task %(task)s encountered exception in "
|
||||||
"%(func)s at state %(state)s"),
|
"%(func)s at state %(state)s",
|
||||||
{'task': str(self),
|
{'task': str(self),
|
||||||
'func': str(func),
|
'func': str(func),
|
||||||
'state': state})
|
'state': state})
|
||||||
@ -188,8 +188,8 @@ class TaskManager(object):
|
|||||||
try:
|
try:
|
||||||
status = task._execute_callback(task)
|
status = task._execute_callback(task)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Task %(task)s encountered exception in "
|
LOG.exception("Task %(task)s encountered exception in "
|
||||||
"%(cb)s"),
|
"%(cb)s",
|
||||||
{'task': str(task),
|
{'task': str(task),
|
||||||
'cb': str(task._execute_callback)})
|
'cb': str(task._execute_callback)})
|
||||||
status = constants.TaskStatus.ERROR
|
status = constants.TaskStatus.ERROR
|
||||||
@ -207,8 +207,8 @@ class TaskManager(object):
|
|||||||
try:
|
try:
|
||||||
task._result_callback(task)
|
task._result_callback(task)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Task %(task)s encountered exception in "
|
LOG.exception("Task %(task)s encountered exception in "
|
||||||
"%(cb)s"),
|
"%(cb)s",
|
||||||
{'task': str(task),
|
{'task': str(task),
|
||||||
'cb': str(task._result_callback)})
|
'cb': str(task._result_callback)})
|
||||||
LOG.debug("Task %(task)s return %(status)s",
|
LOG.debug("Task %(task)s return %(status)s",
|
||||||
@ -229,8 +229,8 @@ class TaskManager(object):
|
|||||||
try:
|
try:
|
||||||
status = task._status_callback(task)
|
status = task._status_callback(task)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Task %(task)s encountered exception in "
|
LOG.exception("Task %(task)s encountered exception in "
|
||||||
"%(cb)s"),
|
"%(cb)s",
|
||||||
{'task': str(task),
|
{'task': str(task),
|
||||||
'cb': str(task._status_callback)})
|
'cb': str(task._status_callback)})
|
||||||
status = constants.TaskStatus.ERROR
|
status = constants.TaskStatus.ERROR
|
||||||
@ -295,7 +295,7 @@ class TaskManager(object):
|
|||||||
if self._stopped:
|
if self._stopped:
|
||||||
# Gracefully terminate this thread if the _stopped
|
# Gracefully terminate this thread if the _stopped
|
||||||
# attribute was set to true
|
# attribute was set to true
|
||||||
LOG.info(_LI("Stopping TaskManager"))
|
LOG.info("Stopping TaskManager")
|
||||||
break
|
break
|
||||||
|
|
||||||
# get a task from queue, or timeout for periodic status check
|
# get a task from queue, or timeout for periodic status check
|
||||||
@ -320,8 +320,8 @@ class TaskManager(object):
|
|||||||
else:
|
else:
|
||||||
self._enqueue(task)
|
self._enqueue(task)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("TaskManager terminating because "
|
LOG.exception("TaskManager terminating because "
|
||||||
"of an exception"))
|
"of an exception")
|
||||||
break
|
break
|
||||||
|
|
||||||
def add(self, task):
|
def add(self, task):
|
||||||
@ -342,7 +342,7 @@ class TaskManager(object):
|
|||||||
if self._monitor_busy:
|
if self._monitor_busy:
|
||||||
self._monitor.wait()
|
self._monitor.wait()
|
||||||
self._abort()
|
self._abort()
|
||||||
LOG.info(_LI("TaskManager terminated"))
|
LOG.info("TaskManager terminated")
|
||||||
|
|
||||||
def has_pending_task(self):
|
def has_pending_task(self):
|
||||||
if self._tasks_queue or self._tasks or self._main_thread_exec_task:
|
if self._tasks_queue or self._tasks or self._main_thread_exec_task:
|
||||||
@ -374,7 +374,7 @@ class TaskManager(object):
|
|||||||
try:
|
try:
|
||||||
self._check_pending_tasks()
|
self._check_pending_tasks()
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Exception in _check_pending_tasks"))
|
LOG.exception("Exception in _check_pending_tasks")
|
||||||
self._monitor_busy = False
|
self._monitor_busy = False
|
||||||
|
|
||||||
if self._thread is not None:
|
if self._thread is not None:
|
||||||
|
@ -20,7 +20,6 @@ from oslo_serialization import jsonutils
|
|||||||
import six
|
import six
|
||||||
import xml.etree.ElementTree as et
|
import xml.etree.ElementTree as et
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE
|
|
||||||
from vmware_nsx.common import nsxv_constants
|
from vmware_nsx.common import nsxv_constants
|
||||||
from vmware_nsx.common import utils
|
from vmware_nsx.common import utils
|
||||||
from vmware_nsx.plugins.nsx_v.vshield.common import constants
|
from vmware_nsx.plugins.nsx_v.vshield.common import constants
|
||||||
@ -911,7 +910,7 @@ class Vcns(object):
|
|||||||
self._nsx_version = self._get_version()
|
self._nsx_version = self._get_version()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# Versions prior to 6.2.0 do not support the above API
|
# Versions prior to 6.2.0 do not support the above API
|
||||||
LOG.error(_LE("Unable to get NSX version. Exception: %s"), e)
|
LOG.error("Unable to get NSX version. Exception: %s", e)
|
||||||
# Minimum supported version is 6.1
|
# Minimum supported version is 6.1
|
||||||
self._nsx_version = '6.1'
|
self._nsx_version = '6.1'
|
||||||
return self._nsx_version
|
return self._nsx_version
|
||||||
|
@ -20,7 +20,6 @@ import logging
|
|||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE
|
|
||||||
from vmware_nsx.db import db as nsx_db
|
from vmware_nsx.db import db as nsx_db
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -82,9 +81,9 @@ class DbCertificateStorageDriver(object):
|
|||||||
except fernet.InvalidToken:
|
except fernet.InvalidToken:
|
||||||
# unable to decrypt - probably due to change of password
|
# unable to decrypt - probably due to change of password
|
||||||
# cert and PK are useless, need to delete them
|
# cert and PK are useless, need to delete them
|
||||||
LOG.error(_LE("Unable to decrypt private key, possibly due "
|
LOG.error("Unable to decrypt private key, possibly due "
|
||||||
"to change of password. Certificate needs to be "
|
"to change of password. Certificate needs to be "
|
||||||
"regenerated"))
|
"regenerated")
|
||||||
self.delete_cert(purpose)
|
self.delete_cert(purpose)
|
||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
|
@ -76,7 +76,7 @@ from oslo_utils import importutils
|
|||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
from sqlalchemy import exc as sql_exc
|
from sqlalchemy import exc as sql_exc
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE, _LI, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.api_replay import utils as api_replay_utils
|
from vmware_nsx.api_replay import utils as api_replay_utils
|
||||||
from vmware_nsx.common import availability_zones as nsx_com_az
|
from vmware_nsx.common import availability_zones as nsx_com_az
|
||||||
from vmware_nsx.common import config # noqa
|
from vmware_nsx.common import config # noqa
|
||||||
@ -180,7 +180,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
super(NsxV3Plugin, self).__init__()
|
super(NsxV3Plugin, self).__init__()
|
||||||
# Bind the dummy L3 notifications
|
# Bind the dummy L3 notifications
|
||||||
self.l3_rpc_notifier = l3_rpc_agent_api.L3NotifyAPI()
|
self.l3_rpc_notifier = l3_rpc_agent_api.L3NotifyAPI()
|
||||||
LOG.info(_LI("Starting NsxV3Plugin"))
|
LOG.info("Starting NsxV3Plugin")
|
||||||
self._extension_manager.initialize()
|
self._extension_manager.initialize()
|
||||||
self.supported_extension_aliases.extend(
|
self.supported_extension_aliases.extend(
|
||||||
self._extension_manager.extension_aliases())
|
self._extension_manager.extension_aliases())
|
||||||
@ -193,7 +193,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
resources.PROCESS, events.AFTER_INIT)
|
resources.PROCESS, events.AFTER_INIT)
|
||||||
|
|
||||||
self._nsx_version = self.nsxlib.get_version()
|
self._nsx_version = self.nsxlib.get_version()
|
||||||
LOG.info(_LI("NSX Version: %s"), self._nsx_version)
|
LOG.info("NSX Version: %s", self._nsx_version)
|
||||||
self._nsx_client = self.nsxlib.client
|
self._nsx_client = self.nsxlib.client
|
||||||
|
|
||||||
self.cfg_group = 'nsx_v3' # group name for nsx_v3 section in nsx.ini
|
self.cfg_group = 'nsx_v3' # group name for nsx_v3 section in nsx.ini
|
||||||
@ -297,8 +297,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
# Only expose the extension if it is supported
|
# Only expose the extension if it is supported
|
||||||
self.supported_extension_aliases.append('mac-learning')
|
self.supported_extension_aliases.append('mac-learning')
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW("Unable to initialize NSX v3 MAC Learning "
|
LOG.warning("Unable to initialize NSX v3 MAC Learning "
|
||||||
"profile: %(name)s. Reason: %(reason)s"),
|
"profile: %(name)s. Reason: %(reason)s",
|
||||||
{'name': NSX_V3_MAC_LEARNING_PROFILE_NAME,
|
{'name': NSX_V3_MAC_LEARNING_PROFILE_NAME,
|
||||||
'reason': e})
|
'reason': e})
|
||||||
|
|
||||||
@ -474,8 +474,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
section_id, logging=log_all_rules)
|
section_id, logging=log_all_rules)
|
||||||
except nsx_lib_exc.ManagerError:
|
except nsx_lib_exc.ManagerError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Failed to update firewall rule logging "
|
LOG.error("Failed to update firewall rule logging "
|
||||||
"for rule in section %s"), section_id)
|
"for rule in section %s", section_id)
|
||||||
|
|
||||||
utils.spawn_n(process_security_group_logging)
|
utils.spawn_n(process_security_group_logging)
|
||||||
|
|
||||||
@ -523,8 +523,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
self._nsx_client)
|
self._nsx_client)
|
||||||
except nsx_lib_exc.ManagerError:
|
except nsx_lib_exc.ManagerError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Unable to retrieve DHCP Profile %s, "
|
LOG.error("Unable to retrieve DHCP Profile %s, "
|
||||||
"native DHCP service is not supported"),
|
"native DHCP service is not supported",
|
||||||
az._native_dhcp_profile_uuid)
|
az._native_dhcp_profile_uuid)
|
||||||
|
|
||||||
def _init_native_metadata(self):
|
def _init_native_metadata(self):
|
||||||
@ -534,8 +534,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
az._native_md_proxy_uuid)
|
az._native_md_proxy_uuid)
|
||||||
except nsx_lib_exc.ManagerError:
|
except nsx_lib_exc.ManagerError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Unable to retrieve Metadata Proxy %s, "
|
LOG.error("Unable to retrieve Metadata Proxy %s, "
|
||||||
"native metadata service is not supported"),
|
"native metadata service is not supported",
|
||||||
az._native_md_proxy_uuid)
|
az._native_md_proxy_uuid)
|
||||||
|
|
||||||
def _setup_rpc(self):
|
def _setup_rpc(self):
|
||||||
@ -843,7 +843,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
# Undo creation on the backend
|
# Undo creation on the backend
|
||||||
LOG.exception(_LE('Failed to create network %s'),
|
LOG.exception('Failed to create network %s',
|
||||||
created_net['id'])
|
created_net['id'])
|
||||||
if net_type != utils.NetworkTypes.L3_EXT:
|
if net_type != utils.NetworkTypes.L3_EXT:
|
||||||
self.nsxlib.logical_switch.delete(created_net['id'])
|
self.nsxlib.logical_switch.delete(created_net['id'])
|
||||||
@ -978,8 +978,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
# the switch when the switch's admin state changes. Do not
|
# the switch when the switch's admin state changes. Do not
|
||||||
# update the admin state of the ports in neutron either.
|
# update the admin state of the ports in neutron either.
|
||||||
except nsx_lib_exc.ManagerError:
|
except nsx_lib_exc.ManagerError:
|
||||||
LOG.exception(_LE("Unable to update NSX backend, rolling "
|
LOG.exception("Unable to update NSX backend, rolling "
|
||||||
"back changes on neutron"))
|
"back changes on neutron")
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
super(NsxV3Plugin, self).update_network(
|
super(NsxV3Plugin, self).update_network(
|
||||||
context, id, {'network': original_net})
|
context, id, {'network': original_net})
|
||||||
@ -1067,8 +1067,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
{'port': nsx_port['id'], 'network': network['id']})
|
{'port': nsx_port['id'], 'network': network['id']})
|
||||||
except nsx_lib_exc.ManagerError:
|
except nsx_lib_exc.ManagerError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Unable to create logical DHCP server for "
|
LOG.error("Unable to create logical DHCP server for "
|
||||||
"network %s"), network['id'])
|
"network %s", network['id'])
|
||||||
if dhcp_server:
|
if dhcp_server:
|
||||||
self._dhcp_server.delete(dhcp_server['id'])
|
self._dhcp_server.delete(dhcp_server['id'])
|
||||||
super(NsxV3Plugin, self).delete_port(
|
super(NsxV3Plugin, self).delete_port(
|
||||||
@ -1085,8 +1085,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
nsxlib_consts.SERVICE_DHCP, dhcp_server['id'])
|
nsxlib_consts.SERVICE_DHCP, dhcp_server['id'])
|
||||||
except (db_exc.DBError, sql_exc.TimeoutError):
|
except (db_exc.DBError, sql_exc.TimeoutError):
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Failed to create mapping for DHCP port %s,"
|
LOG.error("Failed to create mapping for DHCP port %s,"
|
||||||
"deleting port and logical DHCP server"),
|
"deleting port and logical DHCP server",
|
||||||
neutron_port['id'])
|
neutron_port['id'])
|
||||||
self._dhcp_server.delete(dhcp_server['id'])
|
self._dhcp_server.delete(dhcp_server['id'])
|
||||||
self._cleanup_port(context, neutron_port['id'], nsx_port['id'])
|
self._cleanup_port(context, neutron_port['id'], nsx_port['id'])
|
||||||
@ -1096,8 +1096,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
for port_data in existing_ports:
|
for port_data in existing_ports:
|
||||||
self._add_dhcp_binding(context, port_data)
|
self._add_dhcp_binding(context, port_data)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_LE('Unable to create DHCP bindings for existing ports '
|
LOG.error('Unable to create DHCP bindings for existing ports '
|
||||||
'on subnet %s'), subnet['id'])
|
'on subnet %s', subnet['id'])
|
||||||
|
|
||||||
def _disable_native_dhcp(self, context, network_id):
|
def _disable_native_dhcp(self, context, network_id):
|
||||||
# Disable native DHCP service on the backend for this network.
|
# Disable native DHCP service on the backend for this network.
|
||||||
@ -1113,12 +1113,12 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
self.delete_port(context, dhcp_service['port_id'])
|
self.delete_port(context, dhcp_service['port_id'])
|
||||||
except Exception:
|
except Exception:
|
||||||
# This could happen when the port has been manually deleted.
|
# This could happen when the port has been manually deleted.
|
||||||
LOG.error(_LE("Failed to delete DHCP port %(port)s for "
|
LOG.error("Failed to delete DHCP port %(port)s for "
|
||||||
"network %(network)s"),
|
"network %(network)s",
|
||||||
{'port': dhcp_service['port_id'],
|
{'port': dhcp_service['port_id'],
|
||||||
'network': network_id})
|
'network': network_id})
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("DHCP port is not configured for network %s"),
|
LOG.error("DHCP port is not configured for network %s",
|
||||||
network_id)
|
network_id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -1129,8 +1129,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
'network': network_id})
|
'network': network_id})
|
||||||
except nsx_lib_exc.ManagerError:
|
except nsx_lib_exc.ManagerError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Unable to delete logical DHCP server %(server)s"
|
LOG.error("Unable to delete logical DHCP server %(server)s "
|
||||||
"for network %(network)s"),
|
"for network %(network)s",
|
||||||
{'server': dhcp_service['nsx_service_id'],
|
{'server': dhcp_service['nsx_service_id'],
|
||||||
'network': network_id})
|
'network': network_id})
|
||||||
try:
|
try:
|
||||||
@ -1142,8 +1142,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
context.session, dhcp_service['nsx_service_id'])
|
context.session, dhcp_service['nsx_service_id'])
|
||||||
except db_exc.DBError:
|
except db_exc.DBError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Unable to delete DHCP server mapping for "
|
LOG.error("Unable to delete DHCP server mapping for "
|
||||||
"network %s"), network_id)
|
"network %s", network_id)
|
||||||
|
|
||||||
def _validate_address_space(self, subnet):
|
def _validate_address_space(self, subnet):
|
||||||
cidr = subnet.get('cidr')
|
cidr = subnet.get('cidr')
|
||||||
@ -1199,8 +1199,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
# is already empty.
|
# is already empty.
|
||||||
context.session.rollback()
|
context.session.rollback()
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("An exception occurred while creating "
|
LOG.error("An exception occurred while creating "
|
||||||
"the %(resource)s:%(item)s"),
|
"the %(resource)s:%(item)s",
|
||||||
{'resource': resource, 'item': item})
|
{'resource': resource, 'item': item})
|
||||||
return objects
|
return objects
|
||||||
|
|
||||||
@ -1222,14 +1222,14 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
try:
|
try:
|
||||||
self._port_client.delete(dhcp_info['nsx_port_id'])
|
self._port_client.delete(dhcp_info['nsx_port_id'])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to delete logical port %(id)s "
|
LOG.error("Failed to delete logical port %(id)s "
|
||||||
"during rollback. Exception: %(e)s"),
|
"during rollback. Exception: %(e)s",
|
||||||
{'id': dhcp_info['nsx_port_id'], 'e': e})
|
{'id': dhcp_info['nsx_port_id'], 'e': e})
|
||||||
try:
|
try:
|
||||||
self._dhcp_server.delete(dhcp_info['nsx_service_id'])
|
self._dhcp_server.delete(dhcp_info['nsx_service_id'])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to delete logical DHCP server %(id)s "
|
LOG.error("Failed to delete logical DHCP server %(id)s "
|
||||||
"during rollback. Exception: %(e)s"),
|
"during rollback. Exception: %(e)s",
|
||||||
{'id': dhcp_info['nsx_service_id'], 'e': e})
|
{'id': dhcp_info['nsx_service_id'], 'e': e})
|
||||||
|
|
||||||
def create_subnet_bulk(self, context, subnets):
|
def create_subnet_bulk(self, context, subnets):
|
||||||
@ -1303,8 +1303,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
try:
|
try:
|
||||||
self._disable_native_dhcp(context, network['id'])
|
self._disable_native_dhcp(context, network['id'])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to disable native DHCP for"
|
LOG.error("Failed to disable native DHCP for"
|
||||||
"network %(id)s. Exception: %(e)s"),
|
"network %(id)s. Exception: %(e)s",
|
||||||
{'id': network['id'], 'e': e})
|
{'id': network['id'], 'e': e})
|
||||||
super(NsxV3Plugin, self).delete_subnet(
|
super(NsxV3Plugin, self).delete_subnet(
|
||||||
context, subnet_id)
|
context, subnet_id)
|
||||||
@ -1381,8 +1381,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
except nsx_lib_exc.ManagerError:
|
except nsx_lib_exc.ManagerError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(
|
LOG.error(
|
||||||
_LE("Unable to update logical DHCP server "
|
"Unable to update logical DHCP server "
|
||||||
"%(server)s for network %(network)s"),
|
"%(server)s for network %(network)s",
|
||||||
{'server': dhcp_service['nsx_service_id'],
|
{'server': dhcp_service['nsx_service_id'],
|
||||||
'network': orig_subnet['network_id']})
|
'network': orig_subnet['network_id']})
|
||||||
if 'gateway_ip' in kwargs:
|
if 'gateway_ip' in kwargs:
|
||||||
@ -1637,7 +1637,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
except nsx_lib_exc.ManagerError as inst:
|
except nsx_lib_exc.ManagerError as inst:
|
||||||
# we may fail if the QoS is not supported for this port
|
# we may fail if the QoS is not supported for this port
|
||||||
# (for example - transport zone with KVM)
|
# (for example - transport zone with KVM)
|
||||||
LOG.exception(_LE("Unable to create port on the backend: %s"),
|
LOG.exception("Unable to create port on the backend: %s",
|
||||||
inst)
|
inst)
|
||||||
msg = _("Unable to create port on the backend")
|
msg = _("Unable to create port on the backend")
|
||||||
raise nsx_exc.NsxPluginException(err_msg=msg)
|
raise nsx_exc.NsxPluginException(err_msg=msg)
|
||||||
@ -1748,9 +1748,9 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
fixed_ip['ip_address'], dhcp_service['nsx_service_id'],
|
fixed_ip['ip_address'], dhcp_service['nsx_service_id'],
|
||||||
binding['id'])
|
binding['id'])
|
||||||
except (db_exc.DBError, sql_exc.TimeoutError):
|
except (db_exc.DBError, sql_exc.TimeoutError):
|
||||||
LOG.error(_LE("Failed to add mapping of DHCP binding "
|
LOG.error("Failed to add mapping of DHCP binding "
|
||||||
"%(binding)s for port %(port)s, deleting"
|
"%(binding)s for port %(port)s, deleting"
|
||||||
"DHCP binding on server"),
|
"DHCP binding on server",
|
||||||
{'binding': binding['id'], 'port': port['id']})
|
{'binding': binding['id'], 'port': port['id']})
|
||||||
self._delete_dhcp_binding_on_server(context, binding)
|
self._delete_dhcp_binding_on_server(context, binding)
|
||||||
|
|
||||||
@ -1825,10 +1825,10 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
return binding
|
return binding
|
||||||
except nsx_lib_exc.ManagerError:
|
except nsx_lib_exc.ManagerError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Unable to create static binding (mac: %(mac)s, "
|
LOG.error("Unable to create static binding (mac: %(mac)s, "
|
||||||
"ip: %(ip)s, gateway: %(gateway)s, options: "
|
"ip: %(ip)s, gateway: %(gateway)s, options: "
|
||||||
"%(options)s) for port %(port)s on logical DHCP "
|
"%(options)s) for port %(port)s on logical DHCP "
|
||||||
"server %(server)s"),
|
"server %(server)s",
|
||||||
{'mac': port['mac_address'], 'ip': ip,
|
{'mac': port['mac_address'], 'ip': ip,
|
||||||
'gateway': gateway_ip, 'options': options,
|
'gateway': gateway_ip, 'options': options,
|
||||||
'port': port['id'],
|
'port': port['id'],
|
||||||
@ -1845,8 +1845,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
context.session, binding['port_id'],
|
context.session, binding['port_id'],
|
||||||
binding['nsx_binding_id'])
|
binding['nsx_binding_id'])
|
||||||
except db_exc.DBError:
|
except db_exc.DBError:
|
||||||
LOG.error(_LE("Unable to delete mapping of DHCP binding "
|
LOG.error("Unable to delete mapping of DHCP binding "
|
||||||
"%(binding)s for port %(port)s"),
|
"%(binding)s for port %(port)s",
|
||||||
{'binding': binding['nsx_binding_id'],
|
{'binding': binding['nsx_binding_id'],
|
||||||
'port': binding['port_id']})
|
'port': binding['port_id']})
|
||||||
|
|
||||||
@ -1860,8 +1860,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
'server': binding['nsx_service_id']})
|
'server': binding['nsx_service_id']})
|
||||||
except nsx_lib_exc.ManagerError:
|
except nsx_lib_exc.ManagerError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Unable to delete static binding for port "
|
LOG.error("Unable to delete static binding for port "
|
||||||
"%(port)s) on logical DHCP server %(server)s"),
|
"%(port)s) on logical DHCP server %(server)s",
|
||||||
{'port': binding['port_id'],
|
{'port': binding['port_id'],
|
||||||
'server': binding['nsx_service_id']})
|
'server': binding['nsx_service_id']})
|
||||||
|
|
||||||
@ -1923,8 +1923,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
'server': dhcp_service['nsx_service_id']})
|
'server': dhcp_service['nsx_service_id']})
|
||||||
except nsx_lib_exc.ManagerError:
|
except nsx_lib_exc.ManagerError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Unable to update IP %(ip)s for logical "
|
LOG.error("Unable to update IP %(ip)s for logical "
|
||||||
"DHCP server %(server)s"),
|
"DHCP server %(server)s",
|
||||||
{'ip': new_ip,
|
{'ip': new_ip,
|
||||||
'server': dhcp_service['nsx_service_id']})
|
'server': dhcp_service['nsx_service_id']})
|
||||||
elif utils.is_port_dhcp_configurable(old_port):
|
elif utils.is_port_dhcp_configurable(old_port):
|
||||||
@ -2001,9 +2001,9 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
'server': binding['nsx_service_id']})
|
'server': binding['nsx_service_id']})
|
||||||
except nsx_lib_exc.ManagerError:
|
except nsx_lib_exc.ManagerError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Unable to update static binding (mac: %(mac)s, "
|
LOG.error("Unable to update static binding (mac: %(mac)s, "
|
||||||
"ip: %(ip)s, gateway: %(gateway)s) for port "
|
"ip: %(ip)s, gateway: %(gateway)s) for port "
|
||||||
"%(port)s on logical DHCP server %(server)s"),
|
"%(port)s on logical DHCP server %(server)s",
|
||||||
{'mac': mac, 'ip': ip, 'gateway': gateway_ip,
|
{'mac': mac, 'ip': ip, 'gateway': gateway_ip,
|
||||||
'port': binding['port_id'],
|
'port': binding['port_id'],
|
||||||
'server': binding['nsx_service_id']})
|
'server': binding['nsx_service_id']})
|
||||||
@ -2079,8 +2079,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
context, port_data, l2gw_port_check, is_psec_on)
|
context, port_data, l2gw_port_check, is_psec_on)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to create port %(id)s on NSX '
|
LOG.error('Failed to create port %(id)s on NSX '
|
||||||
'backend. Exception: %(e)s'),
|
'backend. Exception: %(e)s',
|
||||||
{'id': neutron_db['id'], 'e': e})
|
{'id': neutron_db['id'], 'e': e})
|
||||||
self._cleanup_port(context, neutron_db['id'], None)
|
self._cleanup_port(context, neutron_db['id'], None)
|
||||||
|
|
||||||
@ -2348,7 +2348,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
except nsx_lib_exc.ManagerError as inst:
|
except nsx_lib_exc.ManagerError as inst:
|
||||||
# we may fail if the QoS is not supported for this port
|
# we may fail if the QoS is not supported for this port
|
||||||
# (for example - transport zone with KVM)
|
# (for example - transport zone with KVM)
|
||||||
LOG.exception(_LE("Unable to update port on the backend: %s"),
|
LOG.exception("Unable to update port on the backend: %s",
|
||||||
inst)
|
inst)
|
||||||
msg = _("Unable to update port on the backend")
|
msg = _("Unable to update port on the backend")
|
||||||
raise nsx_exc.NsxPluginException(err_msg=msg)
|
raise nsx_exc.NsxPluginException(err_msg=msg)
|
||||||
@ -2466,8 +2466,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
nsx_lib_exc.SecurityGroupMaximumCapacityReached) as e:
|
nsx_lib_exc.SecurityGroupMaximumCapacityReached) as e:
|
||||||
# In case if there is a failure on NSX-v3 backend, rollback the
|
# In case if there is a failure on NSX-v3 backend, rollback the
|
||||||
# previous update operation on neutron side.
|
# previous update operation on neutron side.
|
||||||
LOG.exception(_LE("Unable to update NSX backend, rolling back "
|
LOG.exception("Unable to update NSX backend, rolling back "
|
||||||
"changes on neutron"))
|
"changes on neutron")
|
||||||
with excutils.save_and_reraise_exception(reraise=False):
|
with excutils.save_and_reraise_exception(reraise=False):
|
||||||
with context.session.begin(subtransactions=True):
|
with context.session.begin(subtransactions=True):
|
||||||
super(NsxV3Plugin, self).update_port(
|
super(NsxV3Plugin, self).update_port(
|
||||||
@ -2718,8 +2718,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
tags=tags)
|
tags=tags)
|
||||||
except nsx_lib_exc.ManagerError:
|
except nsx_lib_exc.ManagerError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Unable to create logical router for "
|
LOG.error("Unable to create logical router for "
|
||||||
"neutron router %s"), router['id'])
|
"neutron router %s", router['id'])
|
||||||
self.delete_router(context, router['id'])
|
self.delete_router(context, router['id'])
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -2727,8 +2727,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
context.session, router['id'], result['id'])
|
context.session, router['id'], result['id'])
|
||||||
except db_exc.DBError:
|
except db_exc.DBError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Unable to create router mapping for "
|
LOG.error("Unable to create router mapping for "
|
||||||
"router %s"), router['id'])
|
"router %s", router['id'])
|
||||||
self.delete_router(context, router['id'])
|
self.delete_router(context, router['id'])
|
||||||
|
|
||||||
if gw_info and gw_info != const.ATTR_NOT_SPECIFIED:
|
if gw_info and gw_info != const.ATTR_NOT_SPECIFIED:
|
||||||
@ -2736,13 +2736,13 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
self._update_router_gw_info(context, router['id'], gw_info)
|
self._update_router_gw_info(context, router['id'], gw_info)
|
||||||
except (db_exc.DBError, nsx_lib_exc.ManagerError):
|
except (db_exc.DBError, nsx_lib_exc.ManagerError):
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Failed to set gateway info for router "
|
LOG.error("Failed to set gateway info for router "
|
||||||
"being created: %s - removing router"),
|
"being created: %s - removing router",
|
||||||
router['id'])
|
router['id'])
|
||||||
self.delete_router(context, router['id'])
|
self.delete_router(context, router['id'])
|
||||||
LOG.info(_LI("Create router failed while setting external "
|
LOG.info("Create router failed while setting external "
|
||||||
"gateway. Router:%s has been removed from "
|
"gateway. Router:%s has been removed from "
|
||||||
"DB and backend"),
|
"DB and backend",
|
||||||
router['id'])
|
router['id'])
|
||||||
return self.get_router(context, router['id'])
|
return self.get_router(context, router['id'])
|
||||||
|
|
||||||
@ -2773,9 +2773,9 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
# removed from the neutron DB. Take corrective steps to ensure the
|
# removed from the neutron DB. Take corrective steps to ensure the
|
||||||
# resulting zombie object does not forward any traffic and is
|
# resulting zombie object does not forward any traffic and is
|
||||||
# eventually removed.
|
# eventually removed.
|
||||||
LOG.warning(_LW("Backend router deletion for neutron router %s "
|
LOG.warning("Backend router deletion for neutron router %s "
|
||||||
"failed. The object was however removed from the "
|
"failed. The object was however removed from the "
|
||||||
"Neutron database"), router_id)
|
"Neutron database", router_id)
|
||||||
|
|
||||||
return ret_val
|
return ret_val
|
||||||
|
|
||||||
@ -2861,8 +2861,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
self._port_client.update(nsx_port_id, None,
|
self._port_client.update(nsx_port_id, None,
|
||||||
name=name)
|
name=name)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Unable to update port %(port_id)s. "
|
LOG.error("Unable to update port %(port_id)s. "
|
||||||
"Reason: %(e)s"),
|
"Reason: %(e)s",
|
||||||
{'port_id': nsx_port_id,
|
{'port_id': nsx_port_id,
|
||||||
'e': e})
|
'e': e})
|
||||||
if 'description' in router_data:
|
if 'description' in router_data:
|
||||||
@ -3019,8 +3019,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
interface=info)
|
interface=info)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Neutron failed to add_router_interface on "
|
LOG.error("Neutron failed to add_router_interface on "
|
||||||
"router %s, and would try to rollback."),
|
"router %s, and would try to rollback.",
|
||||||
router_id)
|
router_id)
|
||||||
self.remove_router_interface(
|
self.remove_router_interface(
|
||||||
context, router_id, interface_info)
|
context, router_id, interface_info)
|
||||||
@ -3087,8 +3087,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
else:
|
else:
|
||||||
self._router_port_client.delete_by_lswitch_id(nsx_net_id)
|
self._router_port_client.delete_by_lswitch_id(nsx_net_id)
|
||||||
except nsx_lib_exc.ResourceNotFound:
|
except nsx_lib_exc.ResourceNotFound:
|
||||||
LOG.error(_LE("router port on router %(router_id)s for net "
|
LOG.error("router port on router %(router_id)s for net "
|
||||||
"%(net_id)s not found at the backend"),
|
"%(net_id)s not found at the backend",
|
||||||
{'router_id': router_id,
|
{'router_id': router_id,
|
||||||
'net_id': subnet['network_id']})
|
'net_id': subnet['network_id']})
|
||||||
info = super(NsxV3Plugin, self).remove_router_interface(
|
info = super(NsxV3Plugin, self).remove_router_interface(
|
||||||
@ -3146,9 +3146,9 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
nsx_router_id, fip['floating_ip_address'],
|
nsx_router_id, fip['floating_ip_address'],
|
||||||
fip['fixed_ip_address'])
|
fip['fixed_ip_address'])
|
||||||
except nsx_lib_exc.ResourceNotFound:
|
except nsx_lib_exc.ResourceNotFound:
|
||||||
LOG.warning(_LW("Backend NAT rules for fip: %(fip_id)s "
|
LOG.warning("Backend NAT rules for fip: %(fip_id)s "
|
||||||
"(ext_ip: %(ext_ip)s int_ip: %(int_ip)s) "
|
"(ext_ip: %(ext_ip)s int_ip: %(int_ip)s) "
|
||||||
"not found"),
|
"not found",
|
||||||
{'fip_id': fip_id,
|
{'fip_id': fip_id,
|
||||||
'ext_ip': fip['floating_ip_address'],
|
'ext_ip': fip['floating_ip_address'],
|
||||||
'int_ip': fip['fixed_ip_address']})
|
'int_ip': fip['fixed_ip_address']})
|
||||||
@ -3186,9 +3186,9 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
old_nsx_router_id, old_fip['floating_ip_address'],
|
old_nsx_router_id, old_fip['floating_ip_address'],
|
||||||
old_fip['fixed_ip_address'])
|
old_fip['fixed_ip_address'])
|
||||||
except nsx_lib_exc.ResourceNotFound:
|
except nsx_lib_exc.ResourceNotFound:
|
||||||
LOG.warning(_LW("Backend NAT rules for fip: %(fip_id)s "
|
LOG.warning("Backend NAT rules for fip: %(fip_id)s "
|
||||||
"(ext_ip: %(ext_ip)s int_ip: %(int_ip)s) "
|
"(ext_ip: %(ext_ip)s int_ip: %(int_ip)s) "
|
||||||
"not found"),
|
"not found",
|
||||||
{'fip_id': old_fip['id'],
|
{'fip_id': old_fip['id'],
|
||||||
'ext_ip': old_fip['floating_ip_address'],
|
'ext_ip': old_fip['floating_ip_address'],
|
||||||
'int_ip': old_fip['fixed_ip_address']})
|
'int_ip': old_fip['fixed_ip_address']})
|
||||||
@ -3229,9 +3229,9 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
nsx_router_id, fip_db.floating_ip_address,
|
nsx_router_id, fip_db.floating_ip_address,
|
||||||
fip_db.fixed_ip_address)
|
fip_db.fixed_ip_address)
|
||||||
except nsx_lib_exc.ResourceNotFound:
|
except nsx_lib_exc.ResourceNotFound:
|
||||||
LOG.warning(_LW("Backend NAT rules for fip: %(fip_id)s "
|
LOG.warning("Backend NAT rules for fip: %(fip_id)s "
|
||||||
"(ext_ip: %(ext_ip)s int_ip: %(int_ip)s) "
|
"(ext_ip: %(ext_ip)s int_ip: %(int_ip)s) "
|
||||||
"not found"),
|
"not found",
|
||||||
{'fip_id': fip_db.id,
|
{'fip_id': fip_db.id,
|
||||||
'ext_ip': fip_db.floating_ip_address,
|
'ext_ip': fip_db.floating_ip_address,
|
||||||
'int_ip': fip_db.fixed_ip_address})
|
'int_ip': fip_db.fixed_ip_address})
|
||||||
@ -3353,8 +3353,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
default_sg)
|
default_sg)
|
||||||
except nsx_lib_exc.ManagerError:
|
except nsx_lib_exc.ManagerError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Unable to create security-group on the "
|
LOG.exception("Unable to create security-group on the "
|
||||||
"backend."))
|
"backend.")
|
||||||
if ns_group:
|
if ns_group:
|
||||||
self.nsxlib.ns_group.delete(ns_group['id'])
|
self.nsxlib.ns_group.delete(ns_group['id'])
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -3385,9 +3385,9 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
self.save_security_group_rule_mappings(context, rules['rules'])
|
self.save_security_group_rule_mappings(context, rules['rules'])
|
||||||
except nsx_lib_exc.ManagerError:
|
except nsx_lib_exc.ManagerError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to create backend firewall rules "
|
LOG.exception("Failed to create backend firewall rules "
|
||||||
"for security-group %(name)s (%(id)s), "
|
"for security-group %(name)s (%(id)s), "
|
||||||
"rolling back changes."), secgroup_db)
|
"rolling back changes.", secgroup_db)
|
||||||
# default security group deletion requires admin context
|
# default security group deletion requires admin context
|
||||||
if default_sg:
|
if default_sg:
|
||||||
context = context.elevated()
|
context = context.elevated()
|
||||||
@ -3415,9 +3415,9 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
cfg.CONF.nsx_v3.log_security_groups_allowed_traffic)
|
cfg.CONF.nsx_v3.log_security_groups_allowed_traffic)
|
||||||
except nsx_lib_exc.ManagerError:
|
except nsx_lib_exc.ManagerError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Failed to update security-group %(name)s "
|
LOG.exception("Failed to update security-group %(name)s "
|
||||||
"(%(id)s), rolling back changes in "
|
"(%(id)s), rolling back changes in "
|
||||||
"Neutron."), orig_secgroup)
|
"Neutron.", orig_secgroup)
|
||||||
super(NsxV3Plugin, self).update_security_group(
|
super(NsxV3Plugin, self).update_security_group(
|
||||||
context, id, {'security_group': orig_secgroup})
|
context, id, {'security_group': orig_secgroup})
|
||||||
|
|
||||||
|
@ -20,7 +20,6 @@ from oslo_log import log as logging
|
|||||||
from neutron import version as n_version
|
from neutron import version as n_version
|
||||||
from neutron_lib import context as q_context
|
from neutron_lib import context as q_context
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LW
|
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
from vmware_nsx.plugins.nsx_v3 import cert_utils
|
from vmware_nsx.plugins.nsx_v3 import cert_utils
|
||||||
from vmware_nsxlib import v3
|
from vmware_nsxlib import v3
|
||||||
@ -50,11 +49,11 @@ class DbCertProvider(client_cert.ClientCertProvider):
|
|||||||
return
|
return
|
||||||
|
|
||||||
if expires_in_days < 0:
|
if expires_in_days < 0:
|
||||||
LOG.error(_LE("Client certificate has expired %d days ago."),
|
LOG.error("Client certificate has expired %d days ago.",
|
||||||
expires_in_days * -1)
|
expires_in_days * -1)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Client certificate expires in %d days. "
|
LOG.warning("Client certificate expires in %d days. "
|
||||||
"Once expired, service will become unavailable."),
|
"Once expired, service will become unavailable.",
|
||||||
expires_in_days)
|
expires_in_days)
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
|
@ -28,7 +28,7 @@ from oslo_config import cfg
|
|||||||
from oslo_log import helpers as log_helpers
|
from oslo_log import helpers as log_helpers
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import config # noqa
|
from vmware_nsx.common import config # noqa
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
from vmware_nsx.common import locking
|
from vmware_nsx.common import locking
|
||||||
@ -332,8 +332,8 @@ class NsxvFlowClassifierDriver(fc_driver.FlowClassifierDriverBase):
|
|||||||
break
|
break
|
||||||
|
|
||||||
if redirect_rule is None:
|
if redirect_rule is None:
|
||||||
LOG.error(_LE("Failed to delete redirect rule %s: "
|
LOG.error("Failed to delete redirect rule %s: "
|
||||||
"Could not find rule on backed"),
|
"Could not find rule on backed",
|
||||||
flow_classifier_id)
|
flow_classifier_id)
|
||||||
# should not fail the deletion
|
# should not fail the deletion
|
||||||
else:
|
else:
|
||||||
|
@ -26,7 +26,7 @@ from neutron.ipam import requests as ipam_req
|
|||||||
from neutron_lib.api.definitions import provider_net as pnet
|
from neutron_lib.api.definitions import provider_net as pnet
|
||||||
from neutron_lib.api import validators
|
from neutron_lib.api import validators
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.plugins.nsx_v.vshield.common import constants
|
from vmware_nsx.plugins.nsx_v.vshield.common import constants
|
||||||
from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as vc_exc
|
from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as vc_exc
|
||||||
from vmware_nsx.services.ipam.common import driver as common
|
from vmware_nsx.services.ipam.common import driver as common
|
||||||
@ -113,7 +113,7 @@ class NsxvIpamDriver(common.NsxAbstractIpamDriver, NsxVIpamBase):
|
|||||||
try:
|
try:
|
||||||
self._vcns.delete_ipam_ip_pool(nsx_pool_id)
|
self._vcns.delete_ipam_ip_pool(nsx_pool_id)
|
||||||
except vc_exc.VcnsApiException as e:
|
except vc_exc.VcnsApiException as e:
|
||||||
LOG.error(_LE("Failed to delete IPAM from backend: %s"), e)
|
LOG.error("Failed to delete IPAM from backend: %s", e)
|
||||||
# Continue anyway, since this subnet was already removed
|
# Continue anyway, since this subnet was already removed
|
||||||
|
|
||||||
def update_backend_pool(self, subnet_request):
|
def update_backend_pool(self, subnet_request):
|
||||||
@ -132,7 +132,7 @@ class NsxvIpamSubnet(common.NsxAbstractIpamSubnet, NsxVIpamBase):
|
|||||||
desc = et.fromstring(e.response)
|
desc = et.fromstring(e.response)
|
||||||
return int(desc.find('errorCode').text)
|
return int(desc.find('errorCode').text)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_LE('IPAM pool: Error code not present. %s'),
|
LOG.error('IPAM pool: Error code not present. %s',
|
||||||
e.response)
|
e.response)
|
||||||
|
|
||||||
def backend_allocate(self, address_request):
|
def backend_allocate(self, address_request):
|
||||||
@ -169,8 +169,8 @@ class NsxvIpamSubnet(common.NsxAbstractIpamSubnet, NsxVIpamBase):
|
|||||||
try:
|
try:
|
||||||
self._vcns.release_ipam_ip_to_pool(self._nsx_pool_id, address)
|
self._vcns.release_ipam_ip_to_pool(self._nsx_pool_id, address)
|
||||||
except vc_exc.VcnsApiException as e:
|
except vc_exc.VcnsApiException as e:
|
||||||
LOG.error(_LE("NSX IPAM failed to free ip %(ip)s of subnet %(id)s:"
|
LOG.error("NSX IPAM failed to free ip %(ip)s of subnet %(id)s:"
|
||||||
" %(e)s"),
|
" %(e)s",
|
||||||
{'e': e.response,
|
{'e': e.response,
|
||||||
'ip': address,
|
'ip': address,
|
||||||
'id': self._subnet_id})
|
'id': self._subnet_id})
|
||||||
|
@ -21,7 +21,7 @@ from oslo_log import log as logging
|
|||||||
from neutron.ipam import exceptions as ipam_exc
|
from neutron.ipam import exceptions as ipam_exc
|
||||||
from neutron.ipam import requests as ipam_req
|
from neutron.ipam import requests as ipam_req
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE, _LI, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.services.ipam.common import driver as common
|
from vmware_nsx.services.ipam.common import driver as common
|
||||||
from vmware_nsxlib.v3 import exceptions as nsx_lib_exc
|
from vmware_nsxlib.v3 import exceptions as nsx_lib_exc
|
||||||
from vmware_nsxlib.v3 import nsx_constants as error
|
from vmware_nsxlib.v3 import nsx_constants as error
|
||||||
@ -92,13 +92,13 @@ class Nsxv3IpamDriver(common.NsxAbstractIpamDriver):
|
|||||||
try:
|
try:
|
||||||
self.nsxlib_ipam.release(nsx_pool_id, ip_addr)
|
self.nsxlib_ipam.release(nsx_pool_id, ip_addr)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW("Failed to release ip %(ip)s from pool "
|
LOG.warning("Failed to release ip %(ip)s from pool "
|
||||||
"%(pool)s: %(e)s"),
|
"%(pool)s: %(e)s",
|
||||||
{'ip': ip_addr, 'pool': nsx_pool_id, 'e': e})
|
{'ip': ip_addr, 'pool': nsx_pool_id, 'e': e})
|
||||||
try:
|
try:
|
||||||
self.nsxlib_ipam.delete(nsx_pool_id)
|
self.nsxlib_ipam.delete(nsx_pool_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to delete IPAM from backend: %s"), e)
|
LOG.error("Failed to delete IPAM from backend: %s", e)
|
||||||
# Continue anyway, since this subnet was already removed
|
# Continue anyway, since this subnet was already removed
|
||||||
|
|
||||||
def update_backend_pool(self, nsx_pool_id, subnet_request):
|
def update_backend_pool(self, nsx_pool_id, subnet_request):
|
||||||
@ -110,8 +110,8 @@ class Nsxv3IpamDriver(common.NsxAbstractIpamDriver):
|
|||||||
self.nsxlib_ipam.update(
|
self.nsxlib_ipam.update(
|
||||||
nsx_pool_id, **update_args)
|
nsx_pool_id, **update_args)
|
||||||
except nsx_lib_exc.ManagerError as e:
|
except nsx_lib_exc.ManagerError as e:
|
||||||
LOG.error(_LE("NSX IPAM failed to update pool %(id)s: "
|
LOG.error("NSX IPAM failed to update pool %(id)s: "
|
||||||
" %(e)s; code %(code)s"),
|
" %(e)s; code %(code)s",
|
||||||
{'e': e,
|
{'e': e,
|
||||||
'id': nsx_pool_id,
|
'id': nsx_pool_id,
|
||||||
'code': e.error_code})
|
'code': e.error_code})
|
||||||
@ -147,7 +147,7 @@ class Nsxv3IpamSubnet(common.NsxAbstractIpamSubnet):
|
|||||||
# If this is the subnet gateway IP - no need to allocate it
|
# If this is the subnet gateway IP - no need to allocate it
|
||||||
subnet = self.get_details()
|
subnet = self.get_details()
|
||||||
if str(subnet.gateway_ip) == ip_address:
|
if str(subnet.gateway_ip) == ip_address:
|
||||||
LOG.info(_LI("Skip allocation of gateway-ip for pool %s"),
|
LOG.info("Skip allocation of gateway-ip for pool %s",
|
||||||
self._nsx_pool_id)
|
self._nsx_pool_id)
|
||||||
return ip_address
|
return ip_address
|
||||||
else:
|
else:
|
||||||
@ -157,9 +157,8 @@ class Nsxv3IpamSubnet(common.NsxAbstractIpamSubnet):
|
|||||||
ip_addr=ip_address)
|
ip_addr=ip_address)
|
||||||
ip_address = response['allocation_id']
|
ip_address = response['allocation_id']
|
||||||
except nsx_lib_exc.ManagerError as e:
|
except nsx_lib_exc.ManagerError as e:
|
||||||
LOG.error(_LE("NSX IPAM failed to allocate ip %(ip)s of subnet "
|
LOG.error("NSX IPAM failed to allocate ip %(ip)s of subnet "
|
||||||
"%(id)s:"
|
"%(id)s: %(e)s; code %(code)s",
|
||||||
" %(e)s; code %(code)s"),
|
|
||||||
{'e': e,
|
{'e': e,
|
||||||
'ip': ip_address,
|
'ip': ip_address,
|
||||||
'id': self._subnet_id,
|
'id': self._subnet_id,
|
||||||
@ -182,9 +181,8 @@ class Nsxv3IpamSubnet(common.NsxAbstractIpamSubnet):
|
|||||||
# another backend error
|
# another backend error
|
||||||
raise ipam_exc.IPAllocationFailed()
|
raise ipam_exc.IPAllocationFailed()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("NSX IPAM failed to allocate ip %(ip)s of subnet "
|
LOG.error("NSX IPAM failed to allocate ip %(ip)s of subnet "
|
||||||
"%(id)s:"
|
"%(id)s: %(e)s",
|
||||||
" %(e)s"),
|
|
||||||
{'e': e,
|
{'e': e,
|
||||||
'ip': ip_address,
|
'ip': ip_address,
|
||||||
'id': self._subnet_id})
|
'id': self._subnet_id})
|
||||||
@ -197,9 +195,8 @@ class Nsxv3IpamSubnet(common.NsxAbstractIpamSubnet):
|
|||||||
self.nsxlib_ipam.release(self._nsx_pool_id, ip_addr=address)
|
self.nsxlib_ipam.release(self._nsx_pool_id, ip_addr=address)
|
||||||
except nsx_lib_exc.ManagerError as e:
|
except nsx_lib_exc.ManagerError as e:
|
||||||
# fail silently
|
# fail silently
|
||||||
LOG.error(_LE("NSX IPAM failed to free ip %(ip)s of subnet "
|
LOG.error("NSX IPAM failed to free ip %(ip)s of subnet "
|
||||||
"%(id)s:"
|
"%(id)s: %(e)s; code %(code)s",
|
||||||
" %(e)s; code %(code)s"),
|
|
||||||
{'e': e,
|
{'e': e,
|
||||||
'ip': address,
|
'ip': address,
|
||||||
'id': self._subnet_id,
|
'id': self._subnet_id,
|
||||||
|
@ -22,7 +22,7 @@ from neutron_lib.plugins import directory
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
from vmware_nsx.common import nsxv_constants
|
from vmware_nsx.common import nsxv_constants
|
||||||
from vmware_nsx.db import db as nsx_db
|
from vmware_nsx.db import db as nsx_db
|
||||||
@ -89,8 +89,8 @@ class NsxvL2GatewayDriver(l2gateway_db.L2GatewayMixin):
|
|||||||
try:
|
try:
|
||||||
edge_id = self._create_l2_gateway_edge(context)
|
edge_id = self._create_l2_gateway_edge(context)
|
||||||
except nsx_exc.NsxL2GWDeviceNotFound:
|
except nsx_exc.NsxL2GWDeviceNotFound:
|
||||||
LOG.exception(_LE("Failed to create backend device "
|
LOG.exception("Failed to create backend device "
|
||||||
"for L2 gateway"))
|
"for L2 gateway")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
devices[0]['device_name'] = edge_id
|
devices[0]['device_name'] = edge_id
|
||||||
@ -151,8 +151,8 @@ class NsxvL2GatewayDriver(l2gateway_db.L2GatewayMixin):
|
|||||||
try:
|
try:
|
||||||
self._nsxv.create_bridge(device_name, bridge_dict)
|
self._nsxv.create_bridge(device_name, bridge_dict)
|
||||||
except exceptions.VcnsApiException:
|
except exceptions.VcnsApiException:
|
||||||
LOG.exception(_LE("Failed to update NSX, "
|
LOG.exception("Failed to update NSX, "
|
||||||
"rolling back changes on neutron."))
|
"rolling back changes on neutron.")
|
||||||
raise l2gw_exc.L2GatewayServiceDriverError(
|
raise l2gw_exc.L2GatewayServiceDriverError(
|
||||||
method='create_l2_gateway_connection_postcommit')
|
method='create_l2_gateway_connection_postcommit')
|
||||||
return
|
return
|
||||||
|
@ -33,7 +33,7 @@ from neutron_lib import context
|
|||||||
from neutron_lib import exceptions as n_exc
|
from neutron_lib import exceptions as n_exc
|
||||||
from neutron_lib.plugins import directory
|
from neutron_lib.plugins import directory
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE, _LI
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import utils as nsx_utils
|
from vmware_nsx.common import utils as nsx_utils
|
||||||
from vmware_nsx.db import db as nsx_db
|
from vmware_nsx.db import db as nsx_db
|
||||||
from vmware_nsxlib.v3 import exceptions as nsxlib_exc
|
from vmware_nsxlib.v3 import exceptions as nsxlib_exc
|
||||||
@ -78,8 +78,8 @@ class NsxV3Driver(l2gateway_db.L2GatewayMixin):
|
|||||||
def_l2gw_name = cfg.CONF.nsx_v3.default_bridge_cluster
|
def_l2gw_name = cfg.CONF.nsx_v3.default_bridge_cluster
|
||||||
# Return if no default_bridge_cluster set in config
|
# Return if no default_bridge_cluster set in config
|
||||||
if not def_l2gw_name:
|
if not def_l2gw_name:
|
||||||
LOG.info(_LI("NSX: Default bridge cluster not configured "
|
LOG.info("NSX: Default bridge cluster not configured "
|
||||||
"in nsx.ini. No default L2 gateway created."))
|
"in nsx.ini. No default L2 gateway created.")
|
||||||
return
|
return
|
||||||
admin_ctx = context.get_admin_context()
|
admin_ctx = context.get_admin_context()
|
||||||
|
|
||||||
@ -104,7 +104,7 @@ class NsxV3Driver(l2gateway_db.L2GatewayMixin):
|
|||||||
# the first device in the list.
|
# the first device in the list.
|
||||||
if l2gateway['devices'][0]['device_name'] == def_l2gw_uuid:
|
if l2gateway['devices'][0]['device_name'] == def_l2gw_uuid:
|
||||||
if def_l2gw_exists:
|
if def_l2gw_exists:
|
||||||
LOG.info(_LI("Default L2 gateway is already created."))
|
LOG.info("Default L2 gateway is already created.")
|
||||||
try:
|
try:
|
||||||
# Try deleting this duplicate default L2 gateway
|
# Try deleting this duplicate default L2 gateway
|
||||||
self.validate_l2_gateway_for_delete(
|
self.validate_l2_gateway_for_delete(
|
||||||
@ -227,8 +227,8 @@ class NsxV3Driver(l2gateway_db.L2GatewayMixin):
|
|||||||
seg_id=seg_id,
|
seg_id=seg_id,
|
||||||
tags=tags)
|
tags=tags)
|
||||||
except nsxlib_exc.ManagerError as e:
|
except nsxlib_exc.ManagerError as e:
|
||||||
LOG.exception(_LE("Unable to create bridge endpoint, rolling back "
|
LOG.exception("Unable to create bridge endpoint, rolling back "
|
||||||
"changes on neutron. Exception is %s"), e)
|
"changes on neutron. Exception is %s", e)
|
||||||
raise l2gw_exc.L2GatewayServiceDriverError(
|
raise l2gw_exc.L2GatewayServiceDriverError(
|
||||||
method='create_l2_gateway_connection_postcommit')
|
method='create_l2_gateway_connection_postcommit')
|
||||||
#TODO(abhiraut): Consider specifying the name of the port
|
#TODO(abhiraut): Consider specifying the name of the port
|
||||||
@ -255,8 +255,8 @@ class NsxV3Driver(l2gateway_db.L2GatewayMixin):
|
|||||||
LOG.debug("IP addresses deallocated on port %s", port['id'])
|
LOG.debug("IP addresses deallocated on port %s", port['id'])
|
||||||
except (nsxlib_exc.ManagerError,
|
except (nsxlib_exc.ManagerError,
|
||||||
n_exc.NeutronException):
|
n_exc.NeutronException):
|
||||||
LOG.exception(_LE("Unable to create L2 gateway port, "
|
LOG.exception("Unable to create L2 gateway port, "
|
||||||
"rolling back changes on neutron"))
|
"rolling back changes on neutron")
|
||||||
self._core_plugin.nsxlib.bridge_endpoint.delete(
|
self._core_plugin.nsxlib.bridge_endpoint.delete(
|
||||||
bridge_endpoint['id'])
|
bridge_endpoint['id'])
|
||||||
raise l2gw_exc.L2GatewayServiceDriverError(
|
raise l2gw_exc.L2GatewayServiceDriverError(
|
||||||
@ -270,8 +270,8 @@ class NsxV3Driver(l2gateway_db.L2GatewayMixin):
|
|||||||
port_id=port['id'])
|
port_id=port['id'])
|
||||||
except db_exc.DBError:
|
except db_exc.DBError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Unable to add L2 gateway connection "
|
LOG.exception("Unable to add L2 gateway connection "
|
||||||
"mappings, rolling back changes on neutron"))
|
"mappings, rolling back changes on neutron")
|
||||||
self._core_plugin.nsxlib.bridge_endpoint.delete(
|
self._core_plugin.nsxlib.bridge_endpoint.delete(
|
||||||
bridge_endpoint['id'])
|
bridge_endpoint['id'])
|
||||||
super(NsxV3Driver,
|
super(NsxV3Driver,
|
||||||
@ -299,8 +299,8 @@ class NsxV3Driver(l2gateway_db.L2GatewayMixin):
|
|||||||
try:
|
try:
|
||||||
self._core_plugin.nsxlib.bridge_endpoint.delete(bridge_endpoint_id)
|
self._core_plugin.nsxlib.bridge_endpoint.delete(bridge_endpoint_id)
|
||||||
except nsxlib_exc.ManagerError as e:
|
except nsxlib_exc.ManagerError as e:
|
||||||
LOG.exception(_LE("Unable to delete bridge endpoint %(id)s on the "
|
LOG.exception("Unable to delete bridge endpoint %(id)s on the "
|
||||||
"backend due to exc: %(exc)s"),
|
"backend due to exc: %(exc)s",
|
||||||
{'id': bridge_endpoint_id, 'exc': e})
|
{'id': bridge_endpoint_id, 'exc': e})
|
||||||
raise l2gw_exc.L2GatewayServiceDriverError(
|
raise l2gw_exc.L2GatewayServiceDriverError(
|
||||||
method='delete_l2_gateway_connection_postcommit')
|
method='delete_l2_gateway_connection_postcommit')
|
||||||
|
@ -17,7 +17,6 @@ from oslo_log import helpers as log_helpers
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE
|
|
||||||
from vmware_nsx.common import locking
|
from vmware_nsx.common import locking
|
||||||
from vmware_nsx.db import nsxv_db
|
from vmware_nsx.db import nsxv_db
|
||||||
from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxv_exc
|
from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxv_exc
|
||||||
@ -86,8 +85,8 @@ class EdgeHealthMonitorManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.health_monitor.failed_completion(
|
self.lbv2_driver.health_monitor.failed_completion(
|
||||||
context, hm)
|
context, hm)
|
||||||
LOG.error(_LE('Failed to create health monitor on edge: %s'
|
LOG.error('Failed to create health monitor on edge: %s',
|
||||||
), edge_id)
|
edge_id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Associate monitor with Edge pool
|
# Associate monitor with Edge pool
|
||||||
@ -104,7 +103,7 @@ class EdgeHealthMonitorManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.health_monitor.failed_completion(context, hm)
|
self.lbv2_driver.health_monitor.failed_completion(context, hm)
|
||||||
LOG.error(
|
LOG.error(
|
||||||
_LE('Failed to create health monitor on edge: %s'),
|
'Failed to create health monitor on edge: %s',
|
||||||
edge_id)
|
edge_id)
|
||||||
|
|
||||||
self.lbv2_driver.health_monitor.successful_completion(context, hm)
|
self.lbv2_driver.health_monitor.successful_completion(context, hm)
|
||||||
@ -132,8 +131,7 @@ class EdgeHealthMonitorManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.health_monitor.failed_completion(context,
|
self.lbv2_driver.health_monitor.failed_completion(context,
|
||||||
new_hm)
|
new_hm)
|
||||||
LOG.error(
|
LOG.error('Failed to update monitor on edge: %s', edge_id)
|
||||||
_LE('Failed to update monitor on edge: %s'), edge_id)
|
|
||||||
|
|
||||||
self.lbv2_driver.health_monitor.successful_completion(context, new_hm)
|
self.lbv2_driver.health_monitor.successful_completion(context, new_hm)
|
||||||
|
|
||||||
@ -160,9 +158,8 @@ class EdgeHealthMonitorManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
except nsxv_exc.VcnsApiException:
|
except nsxv_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.health_monitor.failed_completion(context, hm)
|
self.lbv2_driver.health_monitor.failed_completion(context, hm)
|
||||||
LOG.error(
|
LOG.error('Failed to delete monitor mapping on edge: %s',
|
||||||
_LE('Failed to delete monitor mapping on edge: %s'),
|
edge_id)
|
||||||
edge_id)
|
|
||||||
|
|
||||||
# If this monitor is not used on this edge anymore, delete it
|
# If this monitor is not used on this edge anymore, delete it
|
||||||
if not edge_pool['monitorId']:
|
if not edge_pool['monitorId']:
|
||||||
@ -174,8 +171,7 @@ class EdgeHealthMonitorManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.health_monitor.failed_completion(context,
|
self.lbv2_driver.health_monitor.failed_completion(context,
|
||||||
hm)
|
hm)
|
||||||
LOG.error(
|
LOG.error('Failed to delete monitor on edge: %s', edge_id)
|
||||||
_LE('Failed to delete monitor on edge: %s'), edge_id)
|
|
||||||
|
|
||||||
nsxv_db.del_nsxv_lbaas_monitor_binding(
|
nsxv_db.del_nsxv_lbaas_monitor_binding(
|
||||||
context.session, lb_id, hm.pool.id, hm.id, edge_id)
|
context.session, lb_id, hm.pool.id, hm.id, edge_id)
|
||||||
|
@ -20,7 +20,7 @@ from oslo_utils import excutils
|
|||||||
from neutron_lib import constants
|
from neutron_lib import constants
|
||||||
from neutron_lib import exceptions as n_exc
|
from neutron_lib import exceptions as n_exc
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import locking
|
from vmware_nsx.common import locking
|
||||||
from vmware_nsx.db import nsxv_db
|
from vmware_nsx.db import nsxv_db
|
||||||
from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common
|
from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common
|
||||||
@ -49,9 +49,9 @@ def policy_to_application_rule(policy):
|
|||||||
type_by_comp = type_by_compare_type.get(rule.compare_type)
|
type_by_comp = type_by_compare_type.get(rule.compare_type)
|
||||||
if type_by_comp is None:
|
if type_by_comp is None:
|
||||||
type_by_comp = ''
|
type_by_comp = ''
|
||||||
LOG.warnning(_LW('Unsupported compare type %(type)s is used in '
|
LOG.warnning('Unsupported compare type %(type)s is used in '
|
||||||
'policy %(id)s'), {'type': rule.compare_type,
|
'policy %(id)s', {'type': rule.compare_type,
|
||||||
'id': policy.id})
|
'id': policy.id})
|
||||||
|
|
||||||
if rule.type == lb_const.L7_RULE_TYPE_COOKIE:
|
if rule.type == lb_const.L7_RULE_TYPE_COOKIE:
|
||||||
# Example: acl <id> hdr_sub(cookie) SEEN=1
|
# Example: acl <id> hdr_sub(cookie) SEEN=1
|
||||||
@ -235,8 +235,8 @@ class EdgeL7PolicyManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.l7policy.failed_completion(context, pol)
|
self.lbv2_driver.l7policy.failed_completion(context, pol)
|
||||||
LOG.error(_LE('Failed to create L7policy on edge %(edge)s: '
|
LOG.error('Failed to create L7policy on edge %(edge)s: '
|
||||||
'%(err)s'),
|
'%(err)s',
|
||||||
{'edge': edge_id, 'err': e})
|
{'edge': edge_id, 'err': e})
|
||||||
if app_rule_id:
|
if app_rule_id:
|
||||||
# Failed to add the rule to the vip: delete the rule
|
# Failed to add the rule to the vip: delete the rule
|
||||||
@ -273,8 +273,8 @@ class EdgeL7PolicyManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.l7policy.failed_completion(context, new_pol)
|
self.lbv2_driver.l7policy.failed_completion(context, new_pol)
|
||||||
LOG.error(_LE('Failed to update L7policy on edge %(edge)s: '
|
LOG.error('Failed to update L7policy on edge %(edge)s: '
|
||||||
'%(err)s'),
|
'%(err)s',
|
||||||
{'edge': edge_id, 'err': e})
|
{'edge': edge_id, 'err': e})
|
||||||
|
|
||||||
# complete the transaction
|
# complete the transaction
|
||||||
@ -305,8 +305,8 @@ class EdgeL7PolicyManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.l7policy.failed_completion(context, pol)
|
self.lbv2_driver.l7policy.failed_completion(context, pol)
|
||||||
LOG.error(_LE('Failed to delete L7policy on edge '
|
LOG.error('Failed to delete L7policy on edge '
|
||||||
'%(edge)s: %(err)s'),
|
'%(edge)s: %(err)s',
|
||||||
{'edge': edge_id, 'err': e})
|
{'edge': edge_id, 'err': e})
|
||||||
|
|
||||||
# delete the nsxv db entry
|
# delete the nsxv db entry
|
||||||
|
@ -17,7 +17,6 @@ from oslo_log import helpers as log_helpers
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE
|
|
||||||
from vmware_nsx.common import locking
|
from vmware_nsx.common import locking
|
||||||
from vmware_nsx.services.lbaas.nsx_v.v2 import base_mgr
|
from vmware_nsx.services.lbaas.nsx_v.v2 import base_mgr
|
||||||
from vmware_nsx.services.lbaas.nsx_v.v2 import l7policy_mgr
|
from vmware_nsx.services.lbaas.nsx_v.v2 import l7policy_mgr
|
||||||
@ -46,8 +45,8 @@ class EdgeL7RuleManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.l7rule.failed_completion(context, rule)
|
self.lbv2_driver.l7rule.failed_completion(context, rule)
|
||||||
LOG.error(_LE('Failed to update L7rules on edge %(edge)s: '
|
LOG.error('Failed to update L7rules on edge %(edge)s: '
|
||||||
'%(err)s'),
|
'%(err)s',
|
||||||
{'edge': edge_id, 'err': e})
|
{'edge': edge_id, 'err': e})
|
||||||
|
|
||||||
# complete the transaction
|
# complete the transaction
|
||||||
|
@ -17,7 +17,7 @@ from oslo_log import helpers as log_helpers
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import exceptions as nsxv_exc
|
from vmware_nsx.common import exceptions as nsxv_exc
|
||||||
from vmware_nsx.common import locking
|
from vmware_nsx.common import locking
|
||||||
from vmware_nsx.db import nsxv_db
|
from vmware_nsx.db import nsxv_db
|
||||||
@ -167,7 +167,7 @@ class EdgeListenerManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
except vcns_exc.VcnsApiException:
|
except vcns_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.listener.failed_completion(context, listener)
|
self.lbv2_driver.listener.failed_completion(context, listener)
|
||||||
LOG.error(_LE('Failed to create app profile on edge: %s'),
|
LOG.error('Failed to create app profile on edge: %s',
|
||||||
lb_binding['edge_id'])
|
lb_binding['edge_id'])
|
||||||
|
|
||||||
vse = listener_to_edge_vse(context, listener,
|
vse = listener_to_edge_vse(context, listener,
|
||||||
@ -190,7 +190,7 @@ class EdgeListenerManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
except vcns_exc.VcnsApiException:
|
except vcns_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.listener.failed_completion(context, listener)
|
self.lbv2_driver.listener.failed_completion(context, listener)
|
||||||
LOG.error(_LE('Failed to create vip on Edge: %s'), edge_id)
|
LOG.error('Failed to create vip on Edge: %s', edge_id)
|
||||||
self.vcns.delete_app_profile(edge_id, app_profile_id)
|
self.vcns.delete_app_profile(edge_id, app_profile_id)
|
||||||
|
|
||||||
@log_helpers.log_method_call
|
@log_helpers.log_method_call
|
||||||
@ -204,7 +204,7 @@ class EdgeListenerManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
if pool_binding:
|
if pool_binding:
|
||||||
default_pool = pool_binding['edge_pool_id']
|
default_pool = pool_binding['edge_pool_id']
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("Couldn't find pool binding for pool %s"),
|
LOG.error("Couldn't find pool binding for pool %s",
|
||||||
new_listener.default_pool.id)
|
new_listener.default_pool.id)
|
||||||
|
|
||||||
lb_id = new_listener.loadbalancer_id
|
lb_id = new_listener.loadbalancer_id
|
||||||
@ -255,7 +255,7 @@ class EdgeListenerManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.listener.failed_completion(context,
|
self.lbv2_driver.listener.failed_completion(context,
|
||||||
new_listener)
|
new_listener)
|
||||||
LOG.error(_LE('Failed to update app profile on edge: %s'),
|
LOG.error('Failed to update app profile on edge: %s',
|
||||||
edge_id)
|
edge_id)
|
||||||
|
|
||||||
@log_helpers.log_method_call
|
@log_helpers.log_method_call
|
||||||
@ -276,25 +276,24 @@ class EdgeListenerManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
self.vcns.delete_vip(edge_id, edge_vse_id)
|
self.vcns.delete_vip(edge_id, edge_vse_id)
|
||||||
|
|
||||||
except vcns_exc.ResourceNotFound:
|
except vcns_exc.ResourceNotFound:
|
||||||
LOG.error(_LE('vip not found on edge: %s'), edge_id)
|
LOG.error('vip not found on edge: %s', edge_id)
|
||||||
except vcns_exc.VcnsApiException:
|
except vcns_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.listener.failed_completion(context,
|
self.lbv2_driver.listener.failed_completion(context,
|
||||||
listener)
|
listener)
|
||||||
LOG.error(
|
LOG.error('Failed to delete vip on edge: %s', edge_id)
|
||||||
_LE('Failed to delete vip on edge: %s'), edge_id)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with locking.LockManager.get_lock(edge_id):
|
with locking.LockManager.get_lock(edge_id):
|
||||||
self.vcns.delete_app_profile(edge_id, app_profile_id)
|
self.vcns.delete_app_profile(edge_id, app_profile_id)
|
||||||
except vcns_exc.ResourceNotFound:
|
except vcns_exc.ResourceNotFound:
|
||||||
LOG.error(_LE('app profile not found on edge: %s'), edge_id)
|
LOG.error('app profile not found on edge: %s', edge_id)
|
||||||
except vcns_exc.VcnsApiException:
|
except vcns_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.listener.failed_completion(context,
|
self.lbv2_driver.listener.failed_completion(context,
|
||||||
listener)
|
listener)
|
||||||
LOG.error(
|
LOG.error(
|
||||||
_LE('Failed to delete app profile on Edge: %s'),
|
'Failed to delete app profile on Edge: %s',
|
||||||
edge_id)
|
edge_id)
|
||||||
|
|
||||||
nsxv_db.del_nsxv_lbaas_listener_binding(context.session, lb_id,
|
nsxv_db.del_nsxv_lbaas_listener_binding(context.session, lb_id,
|
||||||
|
@ -22,7 +22,7 @@ from oslo_log import helpers as log_helpers
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LE
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.db import nsxv_db
|
from vmware_nsx.db import nsxv_db
|
||||||
from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxv_exc
|
from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxv_exc
|
||||||
from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common
|
from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common
|
||||||
@ -69,7 +69,7 @@ class EdgeLoadBalancerManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
except nsxv_exc.VcnsApiException:
|
except nsxv_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.load_balancer.failed_completion(context, lb)
|
self.lbv2_driver.load_balancer.failed_completion(context, lb)
|
||||||
LOG.error(_LE('Failed to create pool %s'), lb.id)
|
LOG.error('Failed to create pool %s', lb.id)
|
||||||
|
|
||||||
@log_helpers.log_method_call
|
@log_helpers.log_method_call
|
||||||
def update(self, context, old_lb, new_lb):
|
def update(self, context, old_lb, new_lb):
|
||||||
@ -107,16 +107,16 @@ class EdgeLoadBalancerManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
self.vcns, binding['edge_id'],
|
self.vcns, binding['edge_id'],
|
||||||
binding['edge_fw_rule_id'])
|
binding['edge_fw_rule_id'])
|
||||||
except nsxv_exc.VcnsApiException as e:
|
except nsxv_exc.VcnsApiException as e:
|
||||||
LOG.error(_LE('Failed to delete loadbalancer %(lb)s '
|
LOG.error('Failed to delete loadbalancer %(lb)s '
|
||||||
'FW rule. exception is %(exc)s'),
|
'FW rule. exception is %(exc)s',
|
||||||
{'lb': lb.id, 'exc': e})
|
{'lb': lb.id, 'exc': e})
|
||||||
try:
|
try:
|
||||||
lb_common.del_vip_as_secondary_ip(self.vcns,
|
lb_common.del_vip_as_secondary_ip(self.vcns,
|
||||||
binding['edge_id'],
|
binding['edge_id'],
|
||||||
lb.vip_address)
|
lb.vip_address)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE('Failed to delete loadbalancer %(lb)s '
|
LOG.error('Failed to delete loadbalancer %(lb)s '
|
||||||
'interface IP. exception is %(exc)s'),
|
'interface IP. exception is %(exc)s',
|
||||||
{'lb': lb.id, 'exc': e})
|
{'lb': lb.id, 'exc': e})
|
||||||
|
|
||||||
nsxv_db.del_nsxv_lbaas_loadbalancer_binding(context.session, lb.id)
|
nsxv_db.del_nsxv_lbaas_loadbalancer_binding(context.session, lb.id)
|
||||||
|
@ -17,7 +17,6 @@ from oslo_log import helpers as log_helpers
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE
|
|
||||||
from vmware_nsx.common import locking
|
from vmware_nsx.common import locking
|
||||||
from vmware_nsx.db import nsxv_db
|
from vmware_nsx.db import nsxv_db
|
||||||
from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxv_exc
|
from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxv_exc
|
||||||
@ -81,7 +80,7 @@ class EdgeMemberManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
except nsxv_exc.VcnsApiException:
|
except nsxv_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.member.failed_completion(context, member)
|
self.lbv2_driver.member.failed_completion(context, member)
|
||||||
LOG.error(_LE('Failed to create member on edge: %s'),
|
LOG.error('Failed to create member on edge: %s',
|
||||||
edge_id)
|
edge_id)
|
||||||
|
|
||||||
@log_helpers.log_method_call
|
@log_helpers.log_method_call
|
||||||
@ -124,13 +123,13 @@ class EdgeMemberManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.member.failed_completion(
|
self.lbv2_driver.member.failed_completion(
|
||||||
context, new_member)
|
context, new_member)
|
||||||
LOG.error(_LE('Failed to update member on edge: %s'),
|
LOG.error('Failed to update member on edge: %s',
|
||||||
edge_id)
|
edge_id)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('Pool %(pool_id)s on Edge %(edge_id)s has no '
|
LOG.error('Pool %(pool_id)s on Edge %(edge_id)s has no '
|
||||||
'members to update')
|
'members to update',
|
||||||
% {'pool_id': new_member.pool.id,
|
{'pool_id': new_member.pool.id,
|
||||||
'edge_id': edge_id})
|
'edge_id': edge_id})
|
||||||
|
|
||||||
@log_helpers.log_method_call
|
@log_helpers.log_method_call
|
||||||
def delete(self, context, member):
|
def delete(self, context, member):
|
||||||
@ -173,5 +172,5 @@ class EdgeMemberManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
except nsxv_exc.VcnsApiException:
|
except nsxv_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.member.failed_completion(context, member)
|
self.lbv2_driver.member.failed_completion(context, member)
|
||||||
LOG.error(_LE('Failed to delete member on edge: %s'),
|
LOG.error('Failed to delete member on edge: %s',
|
||||||
edge_id)
|
edge_id)
|
||||||
|
@ -19,7 +19,6 @@ from oslo_utils import excutils
|
|||||||
|
|
||||||
from neutron_lib import exceptions as n_exc
|
from neutron_lib import exceptions as n_exc
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE
|
|
||||||
from vmware_nsx.common import locking
|
from vmware_nsx.common import locking
|
||||||
from vmware_nsx.db import nsxv_db
|
from vmware_nsx.db import nsxv_db
|
||||||
from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxv_exc
|
from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxv_exc
|
||||||
@ -83,7 +82,7 @@ class EdgePoolManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
except nsxv_exc.VcnsApiException:
|
except nsxv_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.pool.failed_completion(context, pool)
|
self.lbv2_driver.pool.failed_completion(context, pool)
|
||||||
LOG.error(_LE('Failed to create pool %s'), pool.id)
|
LOG.error('Failed to create pool %s', pool.id)
|
||||||
|
|
||||||
@log_helpers.log_method_call
|
@log_helpers.log_method_call
|
||||||
def update(self, context, old_pool, new_pool):
|
def update(self, context, old_pool, new_pool):
|
||||||
@ -119,7 +118,7 @@ class EdgePoolManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
except nsxv_exc.VcnsApiException:
|
except nsxv_exc.VcnsApiException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.lbv2_driver.pool.failed_completion(context, new_pool)
|
self.lbv2_driver.pool.failed_completion(context, new_pool)
|
||||||
LOG.error(_LE('Failed to update pool %s'), new_pool.id)
|
LOG.error('Failed to update pool %s', new_pool.id)
|
||||||
|
|
||||||
@log_helpers.log_method_call
|
@log_helpers.log_method_call
|
||||||
def delete(self, context, pool):
|
def delete(self, context, pool):
|
||||||
@ -154,4 +153,4 @@ class EdgePoolManager(base_mgr.EdgeLoadbalancerBaseManager):
|
|||||||
context.session, lb_id, pool.id)
|
context.session, lb_id, pool.id)
|
||||||
except nsxv_exc.VcnsApiException:
|
except nsxv_exc.VcnsApiException:
|
||||||
self.lbv2_driver.pool.failed_completion(context, pool)
|
self.lbv2_driver.pool.failed_completion(context, pool)
|
||||||
LOG.error(_LE('Failed to delete pool %s'), pool.id)
|
LOG.error('Failed to delete pool %s', pool.id)
|
||||||
|
@ -19,7 +19,7 @@ from neutron.services.qos import qos_plugin
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LI
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -32,7 +32,7 @@ class NsxVQosPlugin(qos_plugin.QoSPlugin):
|
|||||||
supported_extension_aliases = ["qos"]
|
supported_extension_aliases = ["qos"]
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
LOG.info(_LI("Loading VMware NSX-V Qos Service Plugin"))
|
LOG.info("Loading VMware NSX-V Qos Service Plugin")
|
||||||
super(NsxVQosPlugin, self).__init__()
|
super(NsxVQosPlugin, self).__init__()
|
||||||
|
|
||||||
if not cfg.CONF.nsxv.use_dvs_features:
|
if not cfg.CONF.nsxv.use_dvs_features:
|
||||||
|
@ -22,7 +22,7 @@ from neutron_lib.plugins import directory
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from vmware_nsx._i18n import _, _LW
|
from vmware_nsx._i18n import _
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
from vmware_nsx.common import exceptions as nsx_exc
|
||||||
from vmware_nsx.db import db as nsx_db
|
from vmware_nsx.db import db as nsx_db
|
||||||
|
|
||||||
@ -143,16 +143,16 @@ class QosNotificationsHandler(object):
|
|||||||
# Validate the max bandwidth value minimum value
|
# Validate the max bandwidth value minimum value
|
||||||
# (max value is above what neutron allows so no need to check it)
|
# (max value is above what neutron allows so no need to check it)
|
||||||
if (bw_rule.max_kbps < MAX_KBPS_MIN_VALUE):
|
if (bw_rule.max_kbps < MAX_KBPS_MIN_VALUE):
|
||||||
LOG.warning(_LW("Invalid input for max_kbps. "
|
LOG.warning("Invalid input for max_kbps. "
|
||||||
"The minimal legal value is %s"),
|
"The minimal legal value is %s",
|
||||||
MAX_KBPS_MIN_VALUE)
|
MAX_KBPS_MIN_VALUE)
|
||||||
bw_rule.max_kbps = MAX_KBPS_MIN_VALUE
|
bw_rule.max_kbps = MAX_KBPS_MIN_VALUE
|
||||||
|
|
||||||
# validate the burst size value max value
|
# validate the burst size value max value
|
||||||
# (max value is 0, and neutron already validates this)
|
# (max value is 0, and neutron already validates this)
|
||||||
if (bw_rule.max_burst_kbps > MAX_BURST_MAX_VALUE):
|
if (bw_rule.max_burst_kbps > MAX_BURST_MAX_VALUE):
|
||||||
LOG.warning(_LW("Invalid input for burst_size. "
|
LOG.warning("Invalid input for burst_size. "
|
||||||
"The maximal legal value is %s"),
|
"The maximal legal value is %s",
|
||||||
MAX_BURST_MAX_VALUE)
|
MAX_BURST_MAX_VALUE)
|
||||||
bw_rule.max_burst_kbps = MAX_BURST_MAX_VALUE
|
bw_rule.max_burst_kbps = MAX_BURST_MAX_VALUE
|
||||||
|
|
||||||
|
@ -24,7 +24,6 @@ from neutron.services.trunk import constants as trunk_consts
|
|||||||
from neutron.services.trunk.drivers import base
|
from neutron.services.trunk.drivers import base
|
||||||
from neutron_lib.api.definitions import portbindings
|
from neutron_lib.api.definitions import portbindings
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE
|
|
||||||
from vmware_nsx.common import nsx_constants as nsx_consts
|
from vmware_nsx.common import nsx_constants as nsx_consts
|
||||||
from vmware_nsx.common import utils as nsx_utils
|
from vmware_nsx.common import utils as nsx_utils
|
||||||
from vmware_nsx.db import db as nsx_db
|
from vmware_nsx.db import db as nsx_db
|
||||||
@ -69,8 +68,8 @@ class NsxV3TrunkHandler(object):
|
|||||||
nsx_child_port_id)
|
nsx_child_port_id)
|
||||||
except nsxlib_exc.ResourceNotFound:
|
except nsxlib_exc.ResourceNotFound:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Child port %s not found on the backend. "
|
LOG.error("Child port %s not found on the backend. "
|
||||||
"Setting trunk status to ERROR."),
|
"Setting trunk status to ERROR.",
|
||||||
nsx_child_port_id)
|
nsx_child_port_id)
|
||||||
# Build address bindings and switch profiles otherwise backend will
|
# Build address bindings and switch profiles otherwise backend will
|
||||||
# clear that information during port update
|
# clear that information during port update
|
||||||
@ -103,9 +102,9 @@ class NsxV3TrunkHandler(object):
|
|||||||
traffic_tag=seg_id)
|
traffic_tag=seg_id)
|
||||||
except nsxlib_exc.ManagerError as e:
|
except nsxlib_exc.ManagerError as e:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Unable to update subport for attachment "
|
LOG.error("Unable to update subport for attachment "
|
||||||
"type. Setting trunk status to ERROR. "
|
"type. Setting trunk status to ERROR. "
|
||||||
"Exception is %s"), e)
|
"Exception is %s", e)
|
||||||
|
|
||||||
def _set_subports(self, context, parent_port_id, subports):
|
def _set_subports(self, context, parent_port_id, subports):
|
||||||
for subport in subports:
|
for subport in subports:
|
||||||
|
@ -18,8 +18,6 @@ from oslo_serialization import jsonutils
|
|||||||
|
|
||||||
import prettytable
|
import prettytable
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LI
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@ -29,9 +27,9 @@ def output_formatter(resource_name, resources_list, attrs):
|
|||||||
Depending on the --fmt cli option we format the output as
|
Depending on the --fmt cli option we format the output as
|
||||||
JSON or as a table.
|
JSON or as a table.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI('%(resource_name)s'), {'resource_name': resource_name})
|
LOG.info('%(resource_name)s', {'resource_name': resource_name})
|
||||||
if not resources_list:
|
if not resources_list:
|
||||||
LOG.info(_LI('No resources found'))
|
LOG.info('No resources found')
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
fmt = cfg.CONF.fmt
|
fmt = cfg.CONF.fmt
|
||||||
|
@ -15,11 +15,12 @@
|
|||||||
import six
|
import six
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LI, _
|
from vmware_nsx._i18n import _
|
||||||
|
from vmware_nsx.shell import resources as nsxadmin
|
||||||
|
|
||||||
from neutron.callbacks import registry
|
from neutron.callbacks import registry
|
||||||
from neutron.common import profiler # noqa
|
from neutron.common import profiler # noqa
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from vmware_nsx.shell import resources as nsxadmin
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -34,7 +35,7 @@ def output_header(func):
|
|||||||
def func_desc(*args, **kwargs):
|
def func_desc(*args, **kwargs):
|
||||||
component = '[%s]' % func.__name__.split('_')[0].upper()
|
component = '[%s]' % func.__name__.split('_')[0].upper()
|
||||||
op_desc = [n.capitalize() for n in func.__name__.split('_')[1:]]
|
op_desc = [n.capitalize() for n in func.__name__.split('_')[1:]]
|
||||||
LOG.info(_LI('==== %(component)s %(operation)s ===='),
|
LOG.info('==== %(component)s %(operation)s ====',
|
||||||
{'component': component, 'operation': ' '.join(op_desc)})
|
{'component': component, 'operation': ' '.join(op_desc)})
|
||||||
return func(*args, **kwargs)
|
return func(*args, **kwargs)
|
||||||
func_desc.__name__ = func.__name__
|
func_desc.__name__ = func.__name__
|
||||||
|
@ -19,7 +19,6 @@ from neutron_lib import exceptions
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI
|
|
||||||
from vmware_nsx.common import locking
|
from vmware_nsx.common import locking
|
||||||
from vmware_nsx.common import nsxv_constants
|
from vmware_nsx.common import nsxv_constants
|
||||||
from vmware_nsx.db import nsxv_db
|
from vmware_nsx.db import nsxv_db
|
||||||
@ -90,7 +89,7 @@ def _delete_edge_from_nsx_and_neutron(edge_id, router_id):
|
|||||||
_delete_backup_from_neutron_db(edge_id, router_id)
|
_delete_backup_from_neutron_db(edge_id, router_id)
|
||||||
return True
|
return True
|
||||||
except Exception as expt:
|
except Exception as expt:
|
||||||
LOG.error(_LE("%s"), str(expt))
|
LOG.error("%s", str(expt))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
@ -99,7 +98,7 @@ def _nsx_delete_backup_edge(edge_id, all_backup_edges):
|
|||||||
try:
|
try:
|
||||||
edge_result = nsxv.get_edge(edge_id)
|
edge_result = nsxv.get_edge(edge_id)
|
||||||
except exceptions.NeutronException as x:
|
except exceptions.NeutronException as x:
|
||||||
LOG.error(_LE("%s"), str(x))
|
LOG.error("%s", str(x))
|
||||||
else:
|
else:
|
||||||
# edge_result[0] is response status code
|
# edge_result[0] is response status code
|
||||||
# edge_result[1] is response body
|
# edge_result[1] is response body
|
||||||
@ -108,7 +107,7 @@ def _nsx_delete_backup_edge(edge_id, all_backup_edges):
|
|||||||
if (not edge['name'].startswith('backup-')
|
if (not edge['name'].startswith('backup-')
|
||||||
or edge['id'] not in backup_edges):
|
or edge['id'] not in backup_edges):
|
||||||
LOG.error(
|
LOG.error(
|
||||||
_LE('Edge: %s is not a backup edge; aborting delete'),
|
'Edge: %s is not a backup edge; aborting delete',
|
||||||
edge_id)
|
edge_id)
|
||||||
else:
|
else:
|
||||||
return _delete_edge_from_nsx_and_neutron(edge_id, edge['name'])
|
return _delete_edge_from_nsx_and_neutron(edge_id, edge['name'])
|
||||||
@ -119,18 +118,18 @@ def nsx_clean_backup_edge(resource, event, trigger, **kwargs):
|
|||||||
errmsg = ("Need to specify edge-id property. Add --property "
|
errmsg = ("Need to specify edge-id property. Add --property "
|
||||||
"edge-id=<edge-id>")
|
"edge-id=<edge-id>")
|
||||||
if not kwargs.get('property'):
|
if not kwargs.get('property'):
|
||||||
LOG.error(_LE("%s"), errmsg)
|
LOG.error("%s", errmsg)
|
||||||
return
|
return
|
||||||
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
||||||
edge_id = properties.get('edge-id')
|
edge_id = properties.get('edge-id')
|
||||||
if not edge_id:
|
if not edge_id:
|
||||||
LOG.error(_LE("%s"), errmsg)
|
LOG.error("%s", errmsg)
|
||||||
return
|
return
|
||||||
#ask for the user confirmation
|
#ask for the user confirmation
|
||||||
confirm = admin_utils.query_yes_no(
|
confirm = admin_utils.query_yes_no(
|
||||||
"Do you want to delete edge: %s" % edge_id, default="no")
|
"Do you want to delete edge: %s" % edge_id, default="no")
|
||||||
if not confirm:
|
if not confirm:
|
||||||
LOG.info(_LI("Backup edge deletion aborted by user"))
|
LOG.info("Backup edge deletion aborted by user")
|
||||||
return
|
return
|
||||||
# delete the backup edge
|
# delete the backup edge
|
||||||
_nsx_delete_backup_edge(edge_id, get_nsxv_backup_edges())
|
_nsx_delete_backup_edge(edge_id, get_nsxv_backup_edges())
|
||||||
@ -145,7 +144,7 @@ def nsx_clean_all_backup_edges(resource, event, trigger, **kwargs):
|
|||||||
"Do you want to delete %s backup edges?" % len(backup_edges),
|
"Do you want to delete %s backup edges?" % len(backup_edges),
|
||||||
default="no")
|
default="no")
|
||||||
if not confirm:
|
if not confirm:
|
||||||
LOG.info(_LI("Backup edges deletion aborted by user"))
|
LOG.info("Backup edges deletion aborted by user")
|
||||||
return
|
return
|
||||||
|
|
||||||
deleted_cnt = 0
|
deleted_cnt = 0
|
||||||
@ -154,7 +153,7 @@ def nsx_clean_all_backup_edges(resource, event, trigger, **kwargs):
|
|||||||
if _nsx_delete_backup_edge(edge['id'], backup_edges):
|
if _nsx_delete_backup_edge(edge['id'], backup_edges):
|
||||||
deleted_cnt = deleted_cnt + 1
|
deleted_cnt = deleted_cnt + 1
|
||||||
|
|
||||||
LOG.info(_LI('Done Deleting %s backup edges'), deleted_cnt)
|
LOG.info('Done Deleting %s backup edges', deleted_cnt)
|
||||||
|
|
||||||
|
|
||||||
@admin_utils.output_header
|
@admin_utils.output_header
|
||||||
@ -167,12 +166,12 @@ def neutron_clean_backup_edge(resource, event, trigger, **kwargs):
|
|||||||
errmsg = ("Need to specify router-id property. Add --property "
|
errmsg = ("Need to specify router-id property. Add --property "
|
||||||
"router-id=<router-id>")
|
"router-id=<router-id>")
|
||||||
if not kwargs.get('property'):
|
if not kwargs.get('property'):
|
||||||
LOG.error(_LE("%s"), errmsg)
|
LOG.error("%s", errmsg)
|
||||||
return
|
return
|
||||||
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
||||||
router_id = properties.get('router-id')
|
router_id = properties.get('router-id')
|
||||||
if not router_id:
|
if not router_id:
|
||||||
LOG.error(_LE("%s"), errmsg)
|
LOG.error("%s", errmsg)
|
||||||
return
|
return
|
||||||
|
|
||||||
# look for the router-binding entry
|
# look for the router-binding entry
|
||||||
@ -180,7 +179,7 @@ def neutron_clean_backup_edge(resource, event, trigger, **kwargs):
|
|||||||
rtr_binding = nsxv_db.get_nsxv_router_binding(
|
rtr_binding = nsxv_db.get_nsxv_router_binding(
|
||||||
edgeapi.context.session, router_id)
|
edgeapi.context.session, router_id)
|
||||||
if not rtr_binding:
|
if not rtr_binding:
|
||||||
LOG.error(_LE('Backup %s was not found in DB'), router_id)
|
LOG.error('Backup %s was not found in DB', router_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
edge_id = rtr_binding['edge_id']
|
edge_id = rtr_binding['edge_id']
|
||||||
@ -240,20 +239,20 @@ def nsx_fix_name_mismatch(resource, event, trigger, **kwargs):
|
|||||||
errmsg = ("Need to specify edge-id property. Add --property "
|
errmsg = ("Need to specify edge-id property. Add --property "
|
||||||
"edge-id=<edge-id>")
|
"edge-id=<edge-id>")
|
||||||
if not kwargs.get('property'):
|
if not kwargs.get('property'):
|
||||||
LOG.error(_LE("%s"), errmsg)
|
LOG.error("%s", errmsg)
|
||||||
return
|
return
|
||||||
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
||||||
edgeapi = utils.NeutronDbClient()
|
edgeapi = utils.NeutronDbClient()
|
||||||
edge_id = properties.get('edge-id')
|
edge_id = properties.get('edge-id')
|
||||||
if not edge_id:
|
if not edge_id:
|
||||||
LOG.error(_LE("%s"), errmsg)
|
LOG.error("%s", errmsg)
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
# edge[0] is response status code
|
# edge[0] is response status code
|
||||||
# edge[1] is response body
|
# edge[1] is response body
|
||||||
edge = nsxv.get_edge(edge_id)[1]
|
edge = nsxv.get_edge(edge_id)[1]
|
||||||
except exceptions.NeutronException as e:
|
except exceptions.NeutronException as e:
|
||||||
LOG.error(_LE("%s"), str(e))
|
LOG.error("%s", str(e))
|
||||||
else:
|
else:
|
||||||
if edge['name'].startswith('backup-'):
|
if edge['name'].startswith('backup-'):
|
||||||
|
|
||||||
@ -261,8 +260,7 @@ def nsx_fix_name_mismatch(resource, event, trigger, **kwargs):
|
|||||||
edgeapi.context.session, edge['id'])
|
edgeapi.context.session, edge['id'])
|
||||||
|
|
||||||
if rtr_binding['router_id'] == edge['name']:
|
if rtr_binding['router_id'] == edge['name']:
|
||||||
LOG.error(
|
LOG.error('Edge %s no mismatch with NSX', edge_id)
|
||||||
_LE('Edge %s no mismatch with NSX'), edge_id)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -292,13 +290,13 @@ def nsx_fix_name_mismatch(resource, event, trigger, **kwargs):
|
|||||||
'-' + rtr_db['id'])
|
'-' + rtr_db['id'])
|
||||||
else:
|
else:
|
||||||
LOG.error(
|
LOG.error(
|
||||||
_LE('No database entry for router id %s'),
|
'No database entry for router id %s',
|
||||||
rtr_binding['router_id'])
|
rtr_binding['router_id'])
|
||||||
|
|
||||||
else:
|
else:
|
||||||
LOG.error(
|
LOG.error(
|
||||||
_LE('Could not determine the name for '
|
'Could not determine the name for '
|
||||||
'Edge %s'), edge_id)
|
'Edge %s', edge_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
confirm = admin_utils.query_yes_no(
|
confirm = admin_utils.query_yes_no(
|
||||||
@ -307,21 +305,21 @@ def nsx_fix_name_mismatch(resource, event, trigger, **kwargs):
|
|||||||
default="no")
|
default="no")
|
||||||
|
|
||||||
if not confirm:
|
if not confirm:
|
||||||
LOG.info(_LI("Edge rename aborted by user"))
|
LOG.info("Edge rename aborted by user")
|
||||||
return
|
return
|
||||||
LOG.info(_LI("Edge rename started"))
|
LOG.info("Edge rename started")
|
||||||
# remove some keys that will fail the NSX transaction
|
# remove some keys that will fail the NSX transaction
|
||||||
edge_utils.remove_irrelevant_keys_from_edge_request(edge)
|
edge_utils.remove_irrelevant_keys_from_edge_request(edge)
|
||||||
try:
|
try:
|
||||||
LOG.error(_LE("Update edge..."))
|
LOG.error("Update edge...")
|
||||||
nsxv.update_edge(edge_id, edge)
|
nsxv.update_edge(edge_id, edge)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Update failed - %s"), (e))
|
LOG.error("Update failed - %s", (e))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("%s"), str(e))
|
LOG.error("%s", str(e))
|
||||||
else:
|
else:
|
||||||
LOG.error(
|
LOG.error(
|
||||||
_LE('Edge %s has no backup prefix on NSX'), edge_id)
|
'Edge %s has no backup prefix on NSX', edge_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
registry.subscribe(nsx_list_backup_edges,
|
registry.subscribe(nsx_list_backup_edges,
|
||||||
|
@ -16,7 +16,6 @@ from neutron.callbacks import registry
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI
|
|
||||||
from vmware_nsx.plugins.nsx_v.vshield.common import exceptions
|
from vmware_nsx.plugins.nsx_v.vshield.common import exceptions
|
||||||
from vmware_nsx.shell.admin.plugins.common import constants
|
from vmware_nsx.shell.admin.plugins.common import constants
|
||||||
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
|
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
|
||||||
@ -32,12 +31,12 @@ def validate_configuration(resource, event, trigger, **kwargs):
|
|||||||
try:
|
try:
|
||||||
utils.NsxVPluginWrapper()
|
utils.NsxVPluginWrapper()
|
||||||
except exceptions.Forbidden:
|
except exceptions.Forbidden:
|
||||||
LOG.error(_LE("Configuration validation failed: wrong VSM credentials "
|
LOG.error("Configuration validation failed: wrong VSM credentials "
|
||||||
"for %s"), cfg.CONF.nsxv.manager_uri)
|
"for %s", cfg.CONF.nsxv.manager_uri)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Configuration validation failed: %s"), e)
|
LOG.error("Configuration validation failed: %s", e)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Configuration validation succeeded"))
|
LOG.info("Configuration validation succeeded")
|
||||||
|
|
||||||
|
|
||||||
registry.subscribe(validate_configuration,
|
registry.subscribe(validate_configuration,
|
||||||
|
@ -27,7 +27,6 @@ import vmware_nsx.shell.resources as shell
|
|||||||
from neutron.callbacks import registry
|
from neutron.callbacks import registry
|
||||||
from neutron.db import l3_db
|
from neutron.db import l3_db
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI, _LW
|
|
||||||
from vmware_nsx.common import locking
|
from vmware_nsx.common import locking
|
||||||
from vmware_nsx.db import nsxv_db
|
from vmware_nsx.db import nsxv_db
|
||||||
from vmware_nsx.plugins.nsx_v.vshield.common import (
|
from vmware_nsx.plugins.nsx_v.vshield.common import (
|
||||||
@ -48,7 +47,7 @@ def nsx_get_static_bindings_by_edge(edge_id):
|
|||||||
try:
|
try:
|
||||||
nsx_dhcp_bindings = nsxv.query_dhcp_configuration(edge_id)
|
nsx_dhcp_bindings = nsxv.query_dhcp_configuration(edge_id)
|
||||||
except exceptions.ResourceNotFound:
|
except exceptions.ResourceNotFound:
|
||||||
LOG.error(_LE("Edge %s was not found"), edge_id)
|
LOG.error("Edge %s was not found", edge_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
# nsx_dhcp_bindings[0] contains response headers;
|
# nsx_dhcp_bindings[0] contains response headers;
|
||||||
@ -83,39 +82,39 @@ def list_missing_dhcp_bindings(resource, event, trigger, **kwargs):
|
|||||||
"""
|
"""
|
||||||
for (edge_id, count) in nsxv_db.get_nsxv_dhcp_bindings_count_per_edge(
|
for (edge_id, count) in nsxv_db.get_nsxv_dhcp_bindings_count_per_edge(
|
||||||
neutron_db.context.session):
|
neutron_db.context.session):
|
||||||
LOG.info(_LI("%s"), "=" * 60)
|
LOG.info("%s", "=" * 60)
|
||||||
LOG.info(_LI("For edge: %s"), edge_id)
|
LOG.info("For edge: %s", edge_id)
|
||||||
nsx_dhcp_static_bindings = nsx_get_static_bindings_by_edge(edge_id)
|
nsx_dhcp_static_bindings = nsx_get_static_bindings_by_edge(edge_id)
|
||||||
if nsx_dhcp_static_bindings is None:
|
if nsx_dhcp_static_bindings is None:
|
||||||
continue
|
continue
|
||||||
neutron_dhcp_static_bindings = \
|
neutron_dhcp_static_bindings = \
|
||||||
neutron_get_static_bindings_by_edge(edge_id)
|
neutron_get_static_bindings_by_edge(edge_id)
|
||||||
LOG.info(_LI("# of DHCP bindings in Neutron DB: %s"),
|
LOG.info("# of DHCP bindings in Neutron DB: %s",
|
||||||
len(neutron_dhcp_static_bindings))
|
len(neutron_dhcp_static_bindings))
|
||||||
LOG.info(_LI("# of DHCP bindings on NSXv backend: %s"),
|
LOG.info("# of DHCP bindings on NSXv backend: %s",
|
||||||
len(nsx_dhcp_static_bindings))
|
len(nsx_dhcp_static_bindings))
|
||||||
missing = neutron_dhcp_static_bindings - nsx_dhcp_static_bindings
|
missing = neutron_dhcp_static_bindings - nsx_dhcp_static_bindings
|
||||||
if not missing:
|
if not missing:
|
||||||
LOG.info(_LI("No missing DHCP bindings found."))
|
LOG.info("No missing DHCP bindings found.")
|
||||||
LOG.info(_LI("Neutron DB and NSXv backend are in sync"))
|
LOG.info("Neutron DB and NSXv backend are in sync")
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Missing DHCP bindings:"))
|
LOG.info("Missing DHCP bindings:")
|
||||||
LOG.info(_LI("%s"), pprint.pformat(missing))
|
LOG.info("%s", pprint.pformat(missing))
|
||||||
|
|
||||||
|
|
||||||
@admin_utils.output_header
|
@admin_utils.output_header
|
||||||
def nsx_update_dhcp_edge_binding(resource, event, trigger, **kwargs):
|
def nsx_update_dhcp_edge_binding(resource, event, trigger, **kwargs):
|
||||||
"""Resync DHCP bindings on NSXv Edge"""
|
"""Resync DHCP bindings on NSXv Edge"""
|
||||||
if not kwargs.get('property'):
|
if not kwargs.get('property'):
|
||||||
LOG.error(_LE("Need to specify edge-id parameter"))
|
LOG.error("Need to specify edge-id parameter")
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
||||||
edge_id = properties.get('edge-id')
|
edge_id = properties.get('edge-id')
|
||||||
if not edge_id:
|
if not edge_id:
|
||||||
LOG.error(_LE("Need to specify edge-id parameter"))
|
LOG.error("Need to specify edge-id parameter")
|
||||||
return
|
return
|
||||||
LOG.info(_LI("Updating NSXv Edge: %s"), edge_id)
|
LOG.info("Updating NSXv Edge: %s", edge_id)
|
||||||
# Need to create a plugin object; so that we are able to
|
# Need to create a plugin object; so that we are able to
|
||||||
# do neutron list-ports.
|
# do neutron list-ports.
|
||||||
with utils.NsxVPluginWrapper() as plugin:
|
with utils.NsxVPluginWrapper() as plugin:
|
||||||
@ -126,11 +125,11 @@ def nsx_update_dhcp_edge_binding(resource, event, trigger, **kwargs):
|
|||||||
edge_manager.update_dhcp_service_config(
|
edge_manager.update_dhcp_service_config(
|
||||||
neutron_db.context, edge_id)
|
neutron_db.context, edge_id)
|
||||||
except exceptions.ResourceNotFound:
|
except exceptions.ResourceNotFound:
|
||||||
LOG.error(_LE("Edge %s not found"), edge_id)
|
LOG.error("Edge %s not found", edge_id)
|
||||||
|
|
||||||
|
|
||||||
def delete_old_dhcp_edge(context, old_edge_id, bindings):
|
def delete_old_dhcp_edge(context, old_edge_id, bindings):
|
||||||
LOG.info(_LI("Deleting the old DHCP edge: %s"), old_edge_id)
|
LOG.info("Deleting the old DHCP edge: %s", old_edge_id)
|
||||||
# using one of the router-ids in the bindings for the deleting
|
# using one of the router-ids in the bindings for the deleting
|
||||||
dhcp_names = [binding['router_id'] for binding in bindings]
|
dhcp_names = [binding['router_id'] for binding in bindings]
|
||||||
dhcp_name = dhcp_names[0]
|
dhcp_name = dhcp_names[0]
|
||||||
@ -142,7 +141,7 @@ def delete_old_dhcp_edge(context, old_edge_id, bindings):
|
|||||||
try:
|
try:
|
||||||
nsxv.delete_edge(old_edge_id)
|
nsxv.delete_edge(old_edge_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW("Failed to delete the old edge %(id)s: %(e)s"),
|
LOG.warning("Failed to delete the old edge %(id)s: %(e)s",
|
||||||
{'id': old_edge_id, 'e': e})
|
{'id': old_edge_id, 'e': e})
|
||||||
# Continue the process anyway
|
# Continue the process anyway
|
||||||
# The edge may have been already deleted at the backend
|
# The edge may have been already deleted at the backend
|
||||||
@ -152,8 +151,8 @@ def delete_old_dhcp_edge(context, old_edge_id, bindings):
|
|||||||
nsxv_db.delete_nsxv_router_binding(context.session, dhcp_name)
|
nsxv_db.delete_nsxv_router_binding(context.session, dhcp_name)
|
||||||
nsxv_db.clean_edge_vnic_binding(context.session, old_edge_id)
|
nsxv_db.clean_edge_vnic_binding(context.session, old_edge_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW("Failed to delete the old edge %(id)s from the "
|
LOG.warning("Failed to delete the old edge %(id)s from the "
|
||||||
"DB : %(e)s"), {'id': old_edge_id, 'e': e})
|
"DB : %(e)s", {'id': old_edge_id, 'e': e})
|
||||||
|
|
||||||
|
|
||||||
def recreate_vdr_dhcp_edge(context, plugin, edge_manager,
|
def recreate_vdr_dhcp_edge(context, plugin, edge_manager,
|
||||||
@ -184,18 +183,18 @@ def recreate_vdr_dhcp_edge(context, plugin, edge_manager,
|
|||||||
new_binding = nsxv_db.get_vdr_dhcp_binding_by_vdr(
|
new_binding = nsxv_db.get_vdr_dhcp_binding_by_vdr(
|
||||||
context.session, vdr_router_id)
|
context.session, vdr_router_id)
|
||||||
if new_binding:
|
if new_binding:
|
||||||
LOG.info(_LI("VDR router %(vdr_id)s was moved to edge %(edge_id)s"),
|
LOG.info("VDR router %(vdr_id)s was moved to edge %(edge_id)s",
|
||||||
{'vdr_id': vdr_router_id,
|
{'vdr_id': vdr_router_id,
|
||||||
'edge_id': new_binding['dhcp_edge_id']})
|
'edge_id': new_binding['dhcp_edge_id']})
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("VDR router %(vdr_id)s was not moved to a new edge"),
|
LOG.error("VDR router %(vdr_id)s was not moved to a new edge",
|
||||||
{'vdr_id': vdr_router_id})
|
{'vdr_id': vdr_router_id})
|
||||||
|
|
||||||
|
|
||||||
def recreate_network_dhcp(context, plugin, edge_manager, old_edge_id, net_id):
|
def recreate_network_dhcp(context, plugin, edge_manager, old_edge_id, net_id):
|
||||||
"""Handle the DHCP edge recreation of a network
|
"""Handle the DHCP edge recreation of a network
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Moving network %s to a new edge"), net_id)
|
LOG.info("Moving network %s to a new edge", net_id)
|
||||||
# delete the old binding
|
# delete the old binding
|
||||||
resource_id = (nsxv_constants.DHCP_EDGE_PREFIX + net_id)[:36]
|
resource_id = (nsxv_constants.DHCP_EDGE_PREFIX + net_id)[:36]
|
||||||
nsxv_db.delete_nsxv_router_binding(context.session, resource_id)
|
nsxv_db.delete_nsxv_router_binding(context.session, resource_id)
|
||||||
@ -214,7 +213,7 @@ def recreate_network_dhcp(context, plugin, edge_manager, old_edge_id, net_id):
|
|||||||
net_filters = {'network_id': [net_id], 'enable_dhcp': [True]}
|
net_filters = {'network_id': [net_id], 'enable_dhcp': [True]}
|
||||||
subnets = plugin.get_subnets(context, filters=net_filters)
|
subnets = plugin.get_subnets(context, filters=net_filters)
|
||||||
for subnet in subnets:
|
for subnet in subnets:
|
||||||
LOG.info(_LI("Moving subnet %s to a new edge"), subnet['id'])
|
LOG.info("Moving subnet %s to a new edge", subnet['id'])
|
||||||
# allocate / reuse the new dhcp edge
|
# allocate / reuse the new dhcp edge
|
||||||
new_resource_id = edge_manager.create_dhcp_edge_service(
|
new_resource_id = edge_manager.create_dhcp_edge_service(
|
||||||
context, net_id, subnet)
|
context, net_id, subnet)
|
||||||
@ -223,7 +222,7 @@ def recreate_network_dhcp(context, plugin, edge_manager, old_edge_id, net_id):
|
|||||||
plugin._update_dhcp_service_new_edge(context, resource_id)
|
plugin._update_dhcp_service_new_edge(context, resource_id)
|
||||||
|
|
||||||
# Update the ip of the dhcp port
|
# Update the ip of the dhcp port
|
||||||
LOG.info(_LI("Creating network %s DHCP address group"), net_id)
|
LOG.info("Creating network %s DHCP address group", net_id)
|
||||||
address_groups = plugin._create_network_dhcp_address_group(
|
address_groups = plugin._create_network_dhcp_address_group(
|
||||||
context, net_id)
|
context, net_id)
|
||||||
plugin._update_dhcp_edge_service(context, net_id, address_groups)
|
plugin._update_dhcp_edge_service(context, net_id, address_groups)
|
||||||
@ -232,17 +231,17 @@ def recreate_network_dhcp(context, plugin, edge_manager, old_edge_id, net_id):
|
|||||||
new_binding = nsxv_db.get_nsxv_router_binding(
|
new_binding = nsxv_db.get_nsxv_router_binding(
|
||||||
context.session, resource_id)
|
context.session, resource_id)
|
||||||
if new_binding:
|
if new_binding:
|
||||||
LOG.info(_LI("Network %(net_id)s was moved to edge %(edge_id)s"),
|
LOG.info("Network %(net_id)s was moved to edge %(edge_id)s",
|
||||||
{'net_id': net_id, 'edge_id': new_binding['edge_id']})
|
{'net_id': net_id, 'edge_id': new_binding['edge_id']})
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("Network %(net_id)s was not moved to a new edge"),
|
LOG.error("Network %(net_id)s was not moved to a new edge",
|
||||||
{'net_id': net_id})
|
{'net_id': net_id})
|
||||||
|
|
||||||
|
|
||||||
@admin_utils.output_header
|
@admin_utils.output_header
|
||||||
def nsx_recreate_dhcp_edge(resource, event, trigger, **kwargs):
|
def nsx_recreate_dhcp_edge(resource, event, trigger, **kwargs):
|
||||||
"""Recreate a dhcp edge with all the networks n a new NSXv edge"""
|
"""Recreate a dhcp edge with all the networks n a new NSXv edge"""
|
||||||
usage_msg = _LE("Need to specify edge-id or net-id parameter")
|
usage_msg = ("Need to specify edge-id or net-id parameter")
|
||||||
if not kwargs.get('property'):
|
if not kwargs.get('property'):
|
||||||
LOG.error(usage_msg)
|
LOG.error(usage_msg)
|
||||||
return
|
return
|
||||||
@ -258,7 +257,7 @@ def nsx_recreate_dhcp_edge(resource, event, trigger, **kwargs):
|
|||||||
return
|
return
|
||||||
LOG.error(usage_msg)
|
LOG.error(usage_msg)
|
||||||
return
|
return
|
||||||
LOG.info(_LI("ReCreating NSXv Edge: %s"), old_edge_id)
|
LOG.info("ReCreating NSXv Edge: %s", old_edge_id)
|
||||||
|
|
||||||
context = n_context.get_admin_context()
|
context = n_context.get_admin_context()
|
||||||
|
|
||||||
@ -268,7 +267,7 @@ def nsx_recreate_dhcp_edge(resource, event, trigger, **kwargs):
|
|||||||
if (not bindings or
|
if (not bindings or
|
||||||
not bindings[0]['router_id'].startswith(
|
not bindings[0]['router_id'].startswith(
|
||||||
nsxv_constants.DHCP_EDGE_PREFIX)):
|
nsxv_constants.DHCP_EDGE_PREFIX)):
|
||||||
LOG.error(_LE("Edge %(edge_id)s is not a DHCP edge"),
|
LOG.error("Edge %(edge_id)s is not a DHCP edge",
|
||||||
{'edge_id': old_edge_id})
|
{'edge_id': old_edge_id})
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -322,7 +321,7 @@ def _get_net_vdr_router_id(plugin, context, net_id):
|
|||||||
|
|
||||||
def nsx_recreate_dhcp_edge_by_net_id(net_id):
|
def nsx_recreate_dhcp_edge_by_net_id(net_id):
|
||||||
"""Recreate a dhcp edge for a specific network without an edge"""
|
"""Recreate a dhcp edge for a specific network without an edge"""
|
||||||
LOG.info(_LI("ReCreating NSXv Edge for network: %s"), net_id)
|
LOG.info("ReCreating NSXv Edge for network: %s", net_id)
|
||||||
|
|
||||||
context = n_context.get_admin_context()
|
context = n_context.get_admin_context()
|
||||||
|
|
||||||
@ -333,8 +332,8 @@ def nsx_recreate_dhcp_edge_by_net_id(net_id):
|
|||||||
if router_binding:
|
if router_binding:
|
||||||
# make sure there is no edge
|
# make sure there is no edge
|
||||||
if router_binding['edge_id']:
|
if router_binding['edge_id']:
|
||||||
LOG.warning(_LW("Network %(net_id)s already has a dhcp edge: "
|
LOG.warning("Network %(net_id)s already has a dhcp edge: "
|
||||||
"%(egde_id)s"),
|
"%(egde_id)s",
|
||||||
{'edge_id': router_binding['edge_id'],
|
{'edge_id': router_binding['edge_id'],
|
||||||
'net_id': net_id})
|
'net_id': net_id})
|
||||||
return
|
return
|
||||||
|
@ -29,7 +29,6 @@ from neutron.callbacks import registry
|
|||||||
from neutron_lib import exceptions
|
from neutron_lib import exceptions
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI
|
|
||||||
from vmware_nsx.common import nsxv_constants
|
from vmware_nsx.common import nsxv_constants
|
||||||
from vmware_nsx.db import nsxv_db
|
from vmware_nsx.db import nsxv_db
|
||||||
from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az
|
from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az
|
||||||
@ -98,12 +97,12 @@ def nsx_list_orphaned_edges(resource, event, trigger, **kwargs):
|
|||||||
Orphaned edges are NSXv edges that exist on NSXv backend but
|
Orphaned edges are NSXv edges that exist on NSXv backend but
|
||||||
don't have a corresponding binding in Neutron DB
|
don't have a corresponding binding in Neutron DB
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("NSXv edges present on NSXv backend but not present "
|
LOG.info("NSXv edges present on NSXv backend but not present "
|
||||||
"in Neutron DB\n"))
|
"in Neutron DB\n")
|
||||||
orphaned_edges = get_orphaned_edges()
|
orphaned_edges = get_orphaned_edges()
|
||||||
if not orphaned_edges:
|
if not orphaned_edges:
|
||||||
LOG.info(_LI("\nNo orphaned edges found."
|
LOG.info("\nNo orphaned edges found."
|
||||||
"\nNeutron DB and NSXv backend are in sync\n"))
|
"\nNeutron DB and NSXv backend are in sync\n")
|
||||||
else:
|
else:
|
||||||
LOG.info(constants.ORPHANED_EDGES)
|
LOG.info(constants.ORPHANED_EDGES)
|
||||||
data = [('edge_id',)]
|
data = [('edge_id',)]
|
||||||
@ -116,7 +115,7 @@ def nsx_list_orphaned_edges(resource, event, trigger, **kwargs):
|
|||||||
def nsx_delete_orphaned_edges(resource, event, trigger, **kwargs):
|
def nsx_delete_orphaned_edges(resource, event, trigger, **kwargs):
|
||||||
"""Delete orphaned edges from NSXv backend"""
|
"""Delete orphaned edges from NSXv backend"""
|
||||||
orphaned_edges = get_orphaned_edges()
|
orphaned_edges = get_orphaned_edges()
|
||||||
LOG.info(_LI("Before delete; Orphaned Edges: %s"), orphaned_edges)
|
LOG.info("Before delete; Orphaned Edges: %s", orphaned_edges)
|
||||||
|
|
||||||
if not kwargs.get('force'):
|
if not kwargs.get('force'):
|
||||||
if len(orphaned_edges):
|
if len(orphaned_edges):
|
||||||
@ -124,15 +123,15 @@ def nsx_delete_orphaned_edges(resource, event, trigger, **kwargs):
|
|||||||
"orphaned edges",
|
"orphaned edges",
|
||||||
default="no")
|
default="no")
|
||||||
if not user_confirm:
|
if not user_confirm:
|
||||||
LOG.info(_LI("NSXv Edge deletion aborted by user"))
|
LOG.info("NSXv Edge deletion aborted by user")
|
||||||
return
|
return
|
||||||
|
|
||||||
nsxv = utils.get_nsxv_client()
|
nsxv = utils.get_nsxv_client()
|
||||||
for edge in orphaned_edges:
|
for edge in orphaned_edges:
|
||||||
LOG.info(_LI("Deleting edge: %s"), edge)
|
LOG.info("Deleting edge: %s", edge)
|
||||||
nsxv.delete_edge(edge)
|
nsxv.delete_edge(edge)
|
||||||
|
|
||||||
LOG.info(_LI("After delete; Orphaned Edges: \n%s"),
|
LOG.info("After delete; Orphaned Edges: \n%s",
|
||||||
pprint.pformat(get_orphaned_edges()))
|
pprint.pformat(get_orphaned_edges()))
|
||||||
|
|
||||||
|
|
||||||
@ -161,12 +160,12 @@ def nsx_list_missing_edges(resource, event, trigger, **kwargs):
|
|||||||
Missing edges are NSXv edges that have a binding in Neutron DB
|
Missing edges are NSXv edges that have a binding in Neutron DB
|
||||||
but are currently missing from the NSXv backend.
|
but are currently missing from the NSXv backend.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("NSXv edges present in Neutron DB but not present "
|
LOG.info("NSXv edges present in Neutron DB but not present "
|
||||||
"on the NSXv backend\n"))
|
"on the NSXv backend\n")
|
||||||
missing_edges = get_missing_edges()
|
missing_edges = get_missing_edges()
|
||||||
if not missing_edges:
|
if not missing_edges:
|
||||||
LOG.info(_LI("\nNo edges are missing."
|
LOG.info("\nNo edges are missing."
|
||||||
"\nNeutron DB and NSXv backend are in sync\n"))
|
"\nNeutron DB and NSXv backend are in sync\n")
|
||||||
else:
|
else:
|
||||||
data = [('edge_id', 'network_id')]
|
data = [('edge_id', 'network_id')]
|
||||||
for edge in missing_edges:
|
for edge in missing_edges:
|
||||||
@ -188,9 +187,9 @@ def change_edge_ha(ha, edge_id):
|
|||||||
try:
|
try:
|
||||||
nsxv.enable_ha(edge_id, request)
|
nsxv.enable_ha(edge_id, request)
|
||||||
except nsxv_exceptions.ResourceNotFound as e:
|
except nsxv_exceptions.ResourceNotFound as e:
|
||||||
LOG.error(_LE("Edge %s not found"), edge_id)
|
LOG.error("Edge %s not found", edge_id)
|
||||||
except exceptions.NeutronException as e:
|
except exceptions.NeutronException as e:
|
||||||
LOG.error(_LE("%s"), str(e))
|
LOG.error("%s", str(e))
|
||||||
|
|
||||||
|
|
||||||
def change_edge_syslog(properties):
|
def change_edge_syslog(properties):
|
||||||
@ -200,7 +199,7 @@ def change_edge_syslog(properties):
|
|||||||
|
|
||||||
request['protocol'] = properties.get('syslog-proto', 'tcp')
|
request['protocol'] = properties.get('syslog-proto', 'tcp')
|
||||||
if request['protocol'] not in ['tcp', 'udp']:
|
if request['protocol'] not in ['tcp', 'udp']:
|
||||||
LOG.error(_LE("Property value error: syslog-proto must be tcp/udp"))
|
LOG.error("Property value error: syslog-proto must be tcp/udp")
|
||||||
return
|
return
|
||||||
|
|
||||||
if properties.get('syslog-server'):
|
if properties.get('syslog-server'):
|
||||||
@ -214,18 +213,18 @@ def change_edge_syslog(properties):
|
|||||||
try:
|
try:
|
||||||
nsxv.update_edge_syslog(edge_id, request)
|
nsxv.update_edge_syslog(edge_id, request)
|
||||||
except nsxv_exceptions.ResourceNotFound as e:
|
except nsxv_exceptions.ResourceNotFound as e:
|
||||||
LOG.error(_LE("Edge %s not found"), edge_id)
|
LOG.error("Edge %s not found", edge_id)
|
||||||
except exceptions.NeutronException as e:
|
except exceptions.NeutronException as e:
|
||||||
LOG.error(_LE("%s"), str(e))
|
LOG.error("%s", str(e))
|
||||||
|
|
||||||
|
|
||||||
def delete_edge_syslog(edge_id):
|
def delete_edge_syslog(edge_id):
|
||||||
try:
|
try:
|
||||||
nsxv.delete_edge_syslog(edge_id)
|
nsxv.delete_edge_syslog(edge_id)
|
||||||
except nsxv_exceptions.ResourceNotFound as e:
|
except nsxv_exceptions.ResourceNotFound as e:
|
||||||
LOG.error(_LE("Edge %s not found"), edge_id)
|
LOG.error("Edge %s not found", edge_id)
|
||||||
except exceptions.NeutronException as e:
|
except exceptions.NeutronException as e:
|
||||||
LOG.error(_LE("%s"), str(e))
|
LOG.error("%s", str(e))
|
||||||
|
|
||||||
|
|
||||||
def change_edge_loglevel(properties):
|
def change_edge_loglevel(properties):
|
||||||
@ -257,17 +256,17 @@ def change_edge_loglevel(properties):
|
|||||||
|
|
||||||
for module, level in modules.items():
|
for module, level in modules.items():
|
||||||
if level == 'none':
|
if level == 'none':
|
||||||
LOG.info(_LI("Disabling logging for %s"), module)
|
LOG.info("Disabling logging for %s", module)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Enabling logging for %(m)s with level %(l)s"),
|
LOG.info("Enabling logging for %(m)s with level %(l)s",
|
||||||
{'m': module, 'l': level})
|
{'m': module, 'l': level})
|
||||||
try:
|
try:
|
||||||
edge_utils.update_edge_loglevel(nsxv, edge_id, module, level)
|
edge_utils.update_edge_loglevel(nsxv, edge_id, module, level)
|
||||||
|
|
||||||
except nsxv_exceptions.ResourceNotFound as e:
|
except nsxv_exceptions.ResourceNotFound as e:
|
||||||
LOG.error(_LE("Edge %s not found"), edge_id)
|
LOG.error("Edge %s not found", edge_id)
|
||||||
except exceptions.NeutronException as e:
|
except exceptions.NeutronException as e:
|
||||||
LOG.error(_LE("%s"), str(e))
|
LOG.error("%s", str(e))
|
||||||
|
|
||||||
# take ownership for properties
|
# take ownership for properties
|
||||||
return True
|
return True
|
||||||
@ -276,16 +275,16 @@ def change_edge_loglevel(properties):
|
|||||||
def change_edge_appliance_size(properties):
|
def change_edge_appliance_size(properties):
|
||||||
size = properties.get('size')
|
size = properties.get('size')
|
||||||
if size not in vcns_const.ALLOWED_EDGE_SIZES:
|
if size not in vcns_const.ALLOWED_EDGE_SIZES:
|
||||||
LOG.error(_LE("Edge appliance size not in %(size)s"),
|
LOG.error("Edge appliance size not in %(size)s",
|
||||||
{'size': vcns_const.ALLOWED_EDGE_SIZES})
|
{'size': vcns_const.ALLOWED_EDGE_SIZES})
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
nsxv.change_edge_appliance_size(
|
nsxv.change_edge_appliance_size(
|
||||||
properties.get('edge-id'), size)
|
properties.get('edge-id'), size)
|
||||||
except nsxv_exceptions.ResourceNotFound as e:
|
except nsxv_exceptions.ResourceNotFound as e:
|
||||||
LOG.error(_LE("Edge %s not found"), properties.get('edge-id'))
|
LOG.error("Edge %s not found", properties.get('edge-id'))
|
||||||
except exceptions.NeutronException as e:
|
except exceptions.NeutronException as e:
|
||||||
LOG.error(_LE("%s"), str(e))
|
LOG.error("%s", str(e))
|
||||||
|
|
||||||
|
|
||||||
def _get_edge_az_and_size(edge_id):
|
def _get_edge_az_and_size(edge_id):
|
||||||
@ -322,9 +321,9 @@ def change_edge_appliance(edge_id):
|
|||||||
try:
|
try:
|
||||||
nsxv.change_edge_appliance(edge_id, request)
|
nsxv.change_edge_appliance(edge_id, request)
|
||||||
except nsxv_exceptions.ResourceNotFound as e:
|
except nsxv_exceptions.ResourceNotFound as e:
|
||||||
LOG.error(_LE("Edge %s not found"), edge_id)
|
LOG.error("Edge %s not found", edge_id)
|
||||||
except exceptions.NeutronException as e:
|
except exceptions.NeutronException as e:
|
||||||
LOG.error(_LE("%s"), str(e))
|
LOG.error("%s", str(e))
|
||||||
else:
|
else:
|
||||||
# also update the edge_ha of the edge
|
# also update the edge_ha of the edge
|
||||||
change_edge_ha(az.edge_ha, edge_id)
|
change_edge_ha(az.edge_ha, edge_id)
|
||||||
@ -341,20 +340,20 @@ def change_edge_appliance_reservations(properties):
|
|||||||
res['shares'] = properties.get('shares')
|
res['shares'] = properties.get('shares')
|
||||||
resource = properties.get('resource')
|
resource = properties.get('resource')
|
||||||
if not res:
|
if not res:
|
||||||
LOG.error(_LE("Please configure reservations"))
|
LOG.error("Please configure reservations")
|
||||||
return
|
return
|
||||||
if resource == 'cpu':
|
if resource == 'cpu':
|
||||||
reservations['cpuReservation'] = res
|
reservations['cpuReservation'] = res
|
||||||
elif resource == 'memory':
|
elif resource == 'memory':
|
||||||
reservations['memoryReservation'] = res
|
reservations['memoryReservation'] = res
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("Please configure resource"))
|
LOG.error("Please configure resource")
|
||||||
return
|
return
|
||||||
edge_id = properties.get('edge-id')
|
edge_id = properties.get('edge-id')
|
||||||
try:
|
try:
|
||||||
h, edge = nsxv.get_edge(edge_id)
|
h, edge = nsxv.get_edge(edge_id)
|
||||||
except exceptions.NeutronException as e:
|
except exceptions.NeutronException as e:
|
||||||
LOG.error(_LE("%s"), str(e))
|
LOG.error("%s", str(e))
|
||||||
return
|
return
|
||||||
appliances = edge['appliances']['appliances']
|
appliances = edge['appliances']['appliances']
|
||||||
for appliance in appliances:
|
for appliance in appliances:
|
||||||
@ -363,9 +362,9 @@ def change_edge_appliance_reservations(properties):
|
|||||||
try:
|
try:
|
||||||
nsxv.change_edge_appliance(edge_id, request)
|
nsxv.change_edge_appliance(edge_id, request)
|
||||||
except nsxv_exceptions.ResourceNotFound as e:
|
except nsxv_exceptions.ResourceNotFound as e:
|
||||||
LOG.error(_LE("Edge %s not found"), edge_id)
|
LOG.error("Edge %s not found", edge_id)
|
||||||
except exceptions.NeutronException as e:
|
except exceptions.NeutronException as e:
|
||||||
LOG.error(_LE("%s"), str(e))
|
LOG.error("%s", str(e))
|
||||||
|
|
||||||
|
|
||||||
def _update_host_group_for_edge(nsxv, dvs_mng, edge_id, edge):
|
def _update_host_group_for_edge(nsxv, dvs_mng, edge_id, edge):
|
||||||
@ -378,11 +377,11 @@ def _update_host_group_for_edge(nsxv, dvs_mng, edge_id, edge):
|
|||||||
dvs_mng, az,
|
dvs_mng, az,
|
||||||
validate=True)
|
validate=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to update edge %(id)s - %(e)s"),
|
LOG.error("Failed to update edge %(id)s - %(e)s",
|
||||||
{'id': edge['id'],
|
{'id': edge['id'],
|
||||||
'e': e})
|
'e': e})
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("%s is not a gateway services"), edge_id)
|
LOG.error("%s is not a gateway services", edge_id)
|
||||||
|
|
||||||
|
|
||||||
def change_edge_hostgroup(properties):
|
def change_edge_hostgroup(properties):
|
||||||
@ -392,7 +391,7 @@ def change_edge_hostgroup(properties):
|
|||||||
try:
|
try:
|
||||||
edge_result = nsxv.get_edge(edge_id)
|
edge_result = nsxv.get_edge(edge_id)
|
||||||
except exceptions.NeutronException as x:
|
except exceptions.NeutronException as x:
|
||||||
LOG.error(_LE("%s"), str(x))
|
LOG.error("%s", str(x))
|
||||||
else:
|
else:
|
||||||
# edge_result[0] is response status code
|
# edge_result[0] is response status code
|
||||||
# edge_result[1] is response body
|
# edge_result[1] is response body
|
||||||
@ -411,32 +410,32 @@ def change_edge_hostgroup(properties):
|
|||||||
try:
|
try:
|
||||||
edge_utils.clean_host_groups(dvs_mng, az)
|
edge_utils.clean_host_groups(dvs_mng, az)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_LE("Failed to clean AZ %s"), az.name)
|
LOG.error("Failed to clean AZ %s", az.name)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('Currently not supported'))
|
LOG.error('Currently not supported')
|
||||||
|
|
||||||
|
|
||||||
@admin_utils.output_header
|
@admin_utils.output_header
|
||||||
def nsx_update_edge(resource, event, trigger, **kwargs):
|
def nsx_update_edge(resource, event, trigger, **kwargs):
|
||||||
"""Update edge properties"""
|
"""Update edge properties"""
|
||||||
usage_msg = _LE("Need to specify edge-id parameter and "
|
usage_msg = ("Need to specify edge-id parameter and "
|
||||||
"attribute to update. Add --property edge-id=<edge-id> "
|
"attribute to update. Add --property edge-id=<edge-id> "
|
||||||
"and --property highavailability=<True/False> or "
|
"and --property highavailability=<True/False> or "
|
||||||
"--property size=<size> or --property appliances=True. "
|
"--property size=<size> or --property appliances=True. "
|
||||||
"\nFor syslog, add --property syslog-server=<ip>|none and "
|
"\nFor syslog, add --property syslog-server=<ip>|none and "
|
||||||
"(optional) --property syslog-server2=<ip> and/or "
|
"(optional) --property syslog-server2=<ip> and/or "
|
||||||
"(optional) --property syslog-proto=[tcp/udp] "
|
"(optional) --property syslog-proto=[tcp/udp] "
|
||||||
"\nFor log levels, add --property [routing|dhcp|dns|"
|
"\nFor log levels, add --property [routing|dhcp|dns|"
|
||||||
"highavailability|loadbalancer]-log-level="
|
"highavailability|loadbalancer]-log-level="
|
||||||
"[debug|info|warning|error]. To set log level for all "
|
"[debug|info|warning|error]. To set log level for all "
|
||||||
"modules, add --property log-level=<level> "
|
"modules, add --property log-level=<level> "
|
||||||
"\nFor edge reservations, add "
|
"\nFor edge reservations, add "
|
||||||
"--property resource=cpu|memory and "
|
"--property resource=cpu|memory and "
|
||||||
"(optional) --property limit=<limit> and/or "
|
"(optional) --property limit=<limit> and/or "
|
||||||
"(optional) --property shares=<shares> and/or "
|
"(optional) --property shares=<shares> and/or "
|
||||||
"(optional) --property reservation=<reservation> "
|
"(optional) --property reservation=<reservation> "
|
||||||
"\nFor hostgroup updates, add "
|
"\nFor hostgroup updates, add "
|
||||||
"--property hostgroup=update/all/clean")
|
"--property hostgroup=update/all/clean")
|
||||||
if not kwargs.get('property'):
|
if not kwargs.get('property'):
|
||||||
LOG.error(usage_msg)
|
LOG.error(usage_msg)
|
||||||
return
|
return
|
||||||
@ -444,10 +443,10 @@ def nsx_update_edge(resource, event, trigger, **kwargs):
|
|||||||
if (not properties.get('edge-id') and
|
if (not properties.get('edge-id') and
|
||||||
not properties.get('hostgroup', '').lower() == "all" and
|
not properties.get('hostgroup', '').lower() == "all" and
|
||||||
not properties.get('hostgroup', '').lower() == "clean"):
|
not properties.get('hostgroup', '').lower() == "clean"):
|
||||||
LOG.error(_LE("Need to specify edge-id. "
|
LOG.error("Need to specify edge-id. "
|
||||||
"Add --property edge-id=<edge-id>"))
|
"Add --property edge-id=<edge-id>")
|
||||||
return
|
return
|
||||||
LOG.info(_LI("Updating NSXv edge: %(edge)s with properties\n%(prop)s"),
|
LOG.info("Updating NSXv edge: %(edge)s with properties\n%(prop)s",
|
||||||
{'edge': properties.get('edge-id'), 'prop': properties})
|
{'edge': properties.get('edge-id'), 'prop': properties})
|
||||||
if properties.get('highavailability'):
|
if properties.get('highavailability'):
|
||||||
change_edge_ha(properties['highavailability'].lower() == "true",
|
change_edge_ha(properties['highavailability'].lower() == "true",
|
||||||
@ -477,8 +476,8 @@ def nsx_update_edge(resource, event, trigger, **kwargs):
|
|||||||
def nsx_update_edges(resource, event, trigger, **kwargs):
|
def nsx_update_edges(resource, event, trigger, **kwargs):
|
||||||
"""Update all edges with the given property"""
|
"""Update all edges with the given property"""
|
||||||
if not kwargs.get('property'):
|
if not kwargs.get('property'):
|
||||||
usage_msg = _LE("Need to specify a property to update all edges. "
|
usage_msg = ("Need to specify a property to update all edges. "
|
||||||
"Add --property appliances=<True/False>")
|
"Add --property appliances=<True/False>")
|
||||||
LOG.error(usage_msg)
|
LOG.error(usage_msg)
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -491,14 +490,13 @@ def nsx_update_edges(resource, event, trigger, **kwargs):
|
|||||||
change_edge_appliance(edge.get('edge-id'))
|
change_edge_appliance(edge.get('edge-id'))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
result += 1
|
result += 1
|
||||||
LOG.error(_LE("Failed to update edge %(edge)s. Exception: "
|
LOG.error("Failed to update edge %(edge)s. Exception: "
|
||||||
"%(e)s"), {'edge': edge.get('edge-id'),
|
"%(e)s", {'edge': edge.get('edge-id'),
|
||||||
'e': str(e)})
|
'e': str(e)})
|
||||||
if result > 0:
|
if result > 0:
|
||||||
total = len(edges)
|
total = len(edges)
|
||||||
msg = (_LE("%(result)s of %(total)s edges failed "
|
LOG.error("%(result)s of %(total)s edges failed "
|
||||||
"to update.") % {'result': result, 'total': total})
|
"to update.", {'result': result, 'total': total})
|
||||||
LOG.error(msg)
|
|
||||||
|
|
||||||
|
|
||||||
registry.subscribe(nsx_list_edges,
|
registry.subscribe(nsx_list_edges,
|
||||||
|
@ -21,7 +21,6 @@ from neutron.db import models_v2
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI
|
|
||||||
from vmware_nsx.common import config
|
from vmware_nsx.common import config
|
||||||
from vmware_nsx.common import locking
|
from vmware_nsx.common import locking
|
||||||
from vmware_nsx.common import nsxv_constants
|
from vmware_nsx.common import nsxv_constants
|
||||||
@ -52,12 +51,12 @@ def nsx_redo_metadata_cfg(resource, event, trigger, **kwargs):
|
|||||||
if az.supports_metadata():
|
if az.supports_metadata():
|
||||||
nsx_redo_metadata_cfg_for_az(az, edgeapi)
|
nsx_redo_metadata_cfg_for_az(az, edgeapi)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Skipping availability zone: %s - no metadata "
|
LOG.info("Skipping availability zone: %s - no metadata "
|
||||||
"configuration"), az.name)
|
"configuration", az.name)
|
||||||
|
|
||||||
|
|
||||||
def nsx_redo_metadata_cfg_for_az(az, edgeapi):
|
def nsx_redo_metadata_cfg_for_az(az, edgeapi):
|
||||||
LOG.info(_LI("Updating MetaData for availability zone: %s"), az.name)
|
LOG.info("Updating MetaData for availability zone: %s", az.name)
|
||||||
|
|
||||||
# Get the list of internal networks for this AZ
|
# Get the list of internal networks for this AZ
|
||||||
db_net = nsxv_db.get_nsxv_internal_network(
|
db_net = nsxv_db.get_nsxv_internal_network(
|
||||||
@ -95,9 +94,9 @@ def nsx_redo_metadata_cfg_for_az(az, edgeapi):
|
|||||||
edge_internal_ips.append(edge_internal_ip['ip_address'])
|
edge_internal_ips.append(edge_internal_ip['ip_address'])
|
||||||
|
|
||||||
if not internal_net or not internal_subnet or not edge_internal_ips:
|
if not internal_net or not internal_subnet or not edge_internal_ips:
|
||||||
LOG.error(_LE("Metadata infrastructure is missing or broken. "
|
LOG.error("Metadata infrastructure is missing or broken. "
|
||||||
"It is recommended to restart neutron service before "
|
"It is recommended to restart neutron service before "
|
||||||
"proceeding with configuration restoration"))
|
"proceeding with configuration restoration")
|
||||||
return
|
return
|
||||||
|
|
||||||
router_bindings = nsxv_db.get_nsxv_router_bindings(
|
router_bindings = nsxv_db.get_nsxv_router_bindings(
|
||||||
@ -210,10 +209,10 @@ def get_metadata_status(resource, event, trigger, **kwargs):
|
|||||||
edgeapi.context.session, net_id)
|
edgeapi.context.session, net_id)
|
||||||
providers = [asp['edge_id'] for asp in as_provider_data]
|
providers = [asp['edge_id'] for asp in as_provider_data]
|
||||||
if providers:
|
if providers:
|
||||||
LOG.info(_LI('Metadata providers for network %s'), net_id)
|
LOG.info('Metadata providers for network %s', net_id)
|
||||||
_md_member_status('Edge %s', providers)
|
_md_member_status('Edge %s', providers)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI('No providers found for network %s'), net_id)
|
LOG.info('No providers found for network %s', net_id)
|
||||||
|
|
||||||
|
|
||||||
registry.subscribe(nsx_redo_metadata_cfg,
|
registry.subscribe(nsx_redo_metadata_cfg,
|
||||||
|
@ -18,7 +18,6 @@ from oslo_serialization import jsonutils
|
|||||||
import re
|
import re
|
||||||
import xml.etree.ElementTree as et
|
import xml.etree.ElementTree as et
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI
|
|
||||||
from vmware_nsx.db import db as nsx_db
|
from vmware_nsx.db import db as nsx_db
|
||||||
from vmware_nsx.plugins.nsx_v.vshield.common import exceptions
|
from vmware_nsx.plugins.nsx_v.vshield.common import exceptions
|
||||||
from vmware_nsx.shell.admin.plugins.common import constants
|
from vmware_nsx.shell.admin.plugins.common import constants
|
||||||
@ -76,20 +75,20 @@ def neutron_list_networks(resource, event, trigger,
|
|||||||
def nsx_update_switch(resource, event, trigger, **kwargs):
|
def nsx_update_switch(resource, event, trigger, **kwargs):
|
||||||
nsxv = utils.get_nsxv_client()
|
nsxv = utils.get_nsxv_client()
|
||||||
if not kwargs.get('property'):
|
if not kwargs.get('property'):
|
||||||
LOG.error(_LE("Need to specify dvs-id parameter and "
|
LOG.error("Need to specify dvs-id parameter and "
|
||||||
"attribute to update. Add --property dvs-id=<dvs-id> "
|
"attribute to update. Add --property dvs-id=<dvs-id> "
|
||||||
"--property teamingpolicy=<policy>"))
|
"--property teamingpolicy=<policy>")
|
||||||
return
|
return
|
||||||
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
||||||
dvs_id = properties.get('dvs-id')
|
dvs_id = properties.get('dvs-id')
|
||||||
if not dvs_id:
|
if not dvs_id:
|
||||||
LOG.error(_LE("Need to specify dvs-id. "
|
LOG.error("Need to specify dvs-id. "
|
||||||
"Add --property dvs-id=<dvs-id>"))
|
"Add --property dvs-id=<dvs-id>")
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
h, switch = nsxv.get_vdn_switch(dvs_id)
|
h, switch = nsxv.get_vdn_switch(dvs_id)
|
||||||
except exceptions.ResourceNotFound:
|
except exceptions.ResourceNotFound:
|
||||||
LOG.error(_LE("DVS %s not found"), dvs_id)
|
LOG.error("DVS %s not found", dvs_id)
|
||||||
return
|
return
|
||||||
supported_policies = ['ETHER_CHANNEL', 'LOADBALANCE_LOADBASED',
|
supported_policies = ['ETHER_CHANNEL', 'LOADBALANCE_LOADBASED',
|
||||||
'LOADBALANCE_SRCID', 'LOADBALANCE_SRCMAC',
|
'LOADBALANCE_SRCID', 'LOADBALANCE_SRCMAC',
|
||||||
@ -98,10 +97,10 @@ def nsx_update_switch(resource, event, trigger, **kwargs):
|
|||||||
policy = properties.get('teamingpolicy')
|
policy = properties.get('teamingpolicy')
|
||||||
if policy in supported_policies:
|
if policy in supported_policies:
|
||||||
if switch['teamingPolicy'] == policy:
|
if switch['teamingPolicy'] == policy:
|
||||||
LOG.info(_LI("Policy already set!"))
|
LOG.info("Policy already set!")
|
||||||
return
|
return
|
||||||
LOG.info(_LI("Updating NSXv switch %(dvs)s teaming policy to "
|
LOG.info("Updating NSXv switch %(dvs)s teaming policy to "
|
||||||
"%(policy)s"), {'dvs': dvs_id, 'policy': policy})
|
"%(policy)s", {'dvs': dvs_id, 'policy': policy})
|
||||||
switch['teamingPolicy'] = policy
|
switch['teamingPolicy'] = policy
|
||||||
try:
|
try:
|
||||||
switch = nsxv.update_vdn_switch(switch)
|
switch = nsxv.update_vdn_switch(switch)
|
||||||
@ -109,17 +108,17 @@ def nsx_update_switch(resource, event, trigger, **kwargs):
|
|||||||
desc = jsonutils.loads(e.response)
|
desc = jsonutils.loads(e.response)
|
||||||
details = desc.get('details')
|
details = desc.get('details')
|
||||||
if details.startswith("No enum constant"):
|
if details.startswith("No enum constant"):
|
||||||
LOG.error(_LE("Unknown teaming policy %s"), policy)
|
LOG.error("Unknown teaming policy %s", policy)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("Unexpected error occurred: %s"), details)
|
LOG.error("Unexpected error occurred: %s", details)
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.info(_LI("Switch value after update: %s"), switch)
|
LOG.info("Switch value after update: %s", switch)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Current switch value is: %s"), switch)
|
LOG.info("Current switch value is: %s", switch)
|
||||||
LOG.error(_LE("Invalid teaming policy. "
|
LOG.error("Invalid teaming policy. "
|
||||||
"Add --property teamingpolicy=<policy>"))
|
"Add --property teamingpolicy=<policy>")
|
||||||
LOG.error(_LE("Possible values: %s"), ', '.join(supported_policies))
|
LOG.error("Possible values: %s", ', '.join(supported_policies))
|
||||||
|
|
||||||
|
|
||||||
@admin_utils.output_header
|
@admin_utils.output_header
|
||||||
@ -202,17 +201,17 @@ def delete_backend_network(resource, event, trigger, **kwargs):
|
|||||||
"""
|
"""
|
||||||
errmsg = ("Need to specify moref property. Add --property moref=<moref>")
|
errmsg = ("Need to specify moref property. Add --property moref=<moref>")
|
||||||
if not kwargs.get('property'):
|
if not kwargs.get('property'):
|
||||||
LOG.error(_LE("%s"), errmsg)
|
LOG.error("%s", errmsg)
|
||||||
return
|
return
|
||||||
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
||||||
moref = properties.get('moref')
|
moref = properties.get('moref')
|
||||||
if not moref:
|
if not moref:
|
||||||
LOG.error(_LE("%s"), errmsg)
|
LOG.error("%s", errmsg)
|
||||||
return
|
return
|
||||||
|
|
||||||
backend_name = get_networks_name_map().get(moref)
|
backend_name = get_networks_name_map().get(moref)
|
||||||
if not backend_name:
|
if not backend_name:
|
||||||
LOG.error(_LE("Failed to find the backend network %(moref)s"),
|
LOG.error("Failed to find the backend network %(moref)s",
|
||||||
{'moref': moref})
|
{'moref': moref})
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -224,26 +223,26 @@ def delete_backend_network(resource, event, trigger, **kwargs):
|
|||||||
# get the dvs id from the backend name:
|
# get the dvs id from the backend name:
|
||||||
dvs_id = get_dvs_id_from_backend_name(backend_name)
|
dvs_id = get_dvs_id_from_backend_name(backend_name)
|
||||||
if not dvs_id:
|
if not dvs_id:
|
||||||
LOG.error(_LE("Failed to find the DVS id of backend network "
|
LOG.error("Failed to find the DVS id of backend network "
|
||||||
"%(moref)s"), {'moref': moref})
|
"%(moref)s", {'moref': moref})
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
nsxv.delete_port_group(dvs_id, moref)
|
nsxv.delete_port_group(dvs_id, moref)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to delete backend network %(moref)s : "
|
LOG.error("Failed to delete backend network %(moref)s : "
|
||||||
"%(e)s"), {'moref': moref, 'e': e})
|
"%(e)s", {'moref': moref, 'e': e})
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Backend network %(moref)s was deleted"),
|
LOG.info("Backend network %(moref)s was deleted",
|
||||||
{'moref': moref})
|
{'moref': moref})
|
||||||
else:
|
else:
|
||||||
# Virtual wire
|
# Virtual wire
|
||||||
try:
|
try:
|
||||||
nsxv.delete_virtual_wire(moref)
|
nsxv.delete_virtual_wire(moref)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to delete backend network %(moref)s : "
|
LOG.error("Failed to delete backend network %(moref)s : "
|
||||||
"%(e)s"), {'moref': moref, 'e': e})
|
"%(e)s", {'moref': moref, 'e': e})
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Backend network %(moref)s was deleted"),
|
LOG.info("Backend network %(moref)s was deleted",
|
||||||
{'moref': moref})
|
{'moref': moref})
|
||||||
|
|
||||||
|
|
||||||
|
@ -23,7 +23,6 @@ from neutron_lib import context as n_context
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI, _LW
|
|
||||||
from vmware_nsx.common import locking
|
from vmware_nsx.common import locking
|
||||||
from vmware_nsx.db import nsxv_db
|
from vmware_nsx.db import nsxv_db
|
||||||
from vmware_nsx.extensions import routersize
|
from vmware_nsx.extensions import routersize
|
||||||
@ -35,7 +34,7 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
def delete_old_edge(context, old_edge_id):
|
def delete_old_edge(context, old_edge_id):
|
||||||
LOG.info(_LI("Deleting the old edge: %s"), old_edge_id)
|
LOG.info("Deleting the old edge: %s", old_edge_id)
|
||||||
|
|
||||||
# clean it up from the DB
|
# clean it up from the DB
|
||||||
nsxv_db.clean_edge_router_binding(context.session, old_edge_id)
|
nsxv_db.clean_edge_router_binding(context.session, old_edge_id)
|
||||||
@ -51,7 +50,7 @@ def delete_old_edge(context, old_edge_id):
|
|||||||
nsxv = utils.get_nsxv_client()
|
nsxv = utils.get_nsxv_client()
|
||||||
nsxv.delete_edge(old_edge_id)
|
nsxv.delete_edge(old_edge_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW("Failed to delete the old edge %(id)s: %(e)s"),
|
LOG.warning("Failed to delete the old edge %(id)s: %(e)s",
|
||||||
{'id': old_edge_id, 'e': e})
|
{'id': old_edge_id, 'e': e})
|
||||||
# Continue the process anyway
|
# Continue the process anyway
|
||||||
# The edge may have been already deleted at the backend
|
# The edge may have been already deleted at the backend
|
||||||
@ -74,16 +73,16 @@ def _get_router_az_from_plugin_router(router):
|
|||||||
def nsx_recreate_router_edge(resource, event, trigger, **kwargs):
|
def nsx_recreate_router_edge(resource, event, trigger, **kwargs):
|
||||||
"""Recreate a router edge with all the data on a new NSXv edge"""
|
"""Recreate a router edge with all the data on a new NSXv edge"""
|
||||||
if not kwargs.get('property'):
|
if not kwargs.get('property'):
|
||||||
LOG.error(_LE("Need to specify edge-id parameter"))
|
LOG.error("Need to specify edge-id parameter")
|
||||||
return
|
return
|
||||||
|
|
||||||
# input validation
|
# input validation
|
||||||
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
||||||
old_edge_id = properties.get('edge-id')
|
old_edge_id = properties.get('edge-id')
|
||||||
if not old_edge_id:
|
if not old_edge_id:
|
||||||
LOG.error(_LE("Need to specify edge-id parameter"))
|
LOG.error("Need to specify edge-id parameter")
|
||||||
return
|
return
|
||||||
LOG.info(_LI("ReCreating NSXv Router Edge: %s"), old_edge_id)
|
LOG.info("ReCreating NSXv Router Edge: %s", old_edge_id)
|
||||||
|
|
||||||
# init the plugin and edge manager
|
# init the plugin and edge manager
|
||||||
cfg.CONF.set_override('core_plugin',
|
cfg.CONF.set_override('core_plugin',
|
||||||
@ -98,7 +97,7 @@ def nsx_recreate_router_edge(resource, event, trigger, **kwargs):
|
|||||||
# verify that this is a Router edge
|
# verify that this is a Router edge
|
||||||
router_ids = edge_manager.get_routers_on_edge(context, old_edge_id)
|
router_ids = edge_manager.get_routers_on_edge(context, old_edge_id)
|
||||||
if not router_ids:
|
if not router_ids:
|
||||||
LOG.error(_LE("Edge %(edge_id)s is not a router edge"),
|
LOG.error("Edge %(edge_id)s is not a router edge",
|
||||||
{'edge_id': old_edge_id})
|
{'edge_id': old_edge_id})
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -108,8 +107,8 @@ def nsx_recreate_router_edge(resource, event, trigger, **kwargs):
|
|||||||
router_driver = plugin._router_managers.get_tenant_router_driver(
|
router_driver = plugin._router_managers.get_tenant_router_driver(
|
||||||
context, example_router['router_type'])
|
context, example_router['router_type'])
|
||||||
if router_driver.get_type() == "distributed":
|
if router_driver.get_type() == "distributed":
|
||||||
LOG.error(_LE("Recreating a distributed driver edge is not "
|
LOG.error("Recreating a distributed driver edge is not "
|
||||||
"supported"))
|
"supported")
|
||||||
return
|
return
|
||||||
|
|
||||||
# load all the routers before deleting their binding
|
# load all the routers before deleting their binding
|
||||||
@ -137,7 +136,7 @@ def nsx_recreate_router_edge(resource, event, trigger, **kwargs):
|
|||||||
# find out who is the new edge to print it
|
# find out who is the new edge to print it
|
||||||
new_edge_id = router_driver._get_edge_id_or_raise(
|
new_edge_id = router_driver._get_edge_id_or_raise(
|
||||||
context, router_id)
|
context, router_id)
|
||||||
LOG.info(_LI("Router %(router)s was attached to edge %(edge)s"),
|
LOG.info("Router %(router)s was attached to edge %(edge)s",
|
||||||
{'router': router_id, 'edge': new_edge_id})
|
{'router': router_id, 'edge': new_edge_id})
|
||||||
|
|
||||||
|
|
||||||
|
@ -23,7 +23,6 @@ from neutron.extensions import securitygroup as ext_sg
|
|||||||
from neutron_lib import context
|
from neutron_lib import context
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI, _LW
|
|
||||||
from vmware_nsx.db import db as nsx_db
|
from vmware_nsx.db import db as nsx_db
|
||||||
from vmware_nsx.db import nsx_models
|
from vmware_nsx.db import nsx_models
|
||||||
from vmware_nsx.db import nsxv_db
|
from vmware_nsx.db import nsxv_db
|
||||||
@ -137,7 +136,7 @@ class NsxFirewallAPI(object):
|
|||||||
# read all the sections
|
# read all the sections
|
||||||
h, firewall_config = self.vcns.get_dfw_config()
|
h, firewall_config = self.vcns.get_dfw_config()
|
||||||
if not firewall_config:
|
if not firewall_config:
|
||||||
LOG.info(_LI("No firewall sections were found."))
|
LOG.info("No firewall sections were found.")
|
||||||
return
|
return
|
||||||
|
|
||||||
root = et.fromstring(firewall_config)
|
root = et.fromstring(firewall_config)
|
||||||
@ -158,7 +157,7 @@ class NsxFirewallAPI(object):
|
|||||||
child.remove(sec)
|
child.remove(sec)
|
||||||
|
|
||||||
if not policy_sections:
|
if not policy_sections:
|
||||||
LOG.info(_LI("No need to reorder the firewall sections."))
|
LOG.info("No need to reorder the firewall sections.")
|
||||||
return
|
return
|
||||||
|
|
||||||
# reorder the sections to have the policy sections first
|
# reorder the sections to have the policy sections first
|
||||||
@ -167,7 +166,7 @@ class NsxFirewallAPI(object):
|
|||||||
|
|
||||||
# update the new order of sections in the backend
|
# update the new order of sections in the backend
|
||||||
self.vcns.update_dfw_config(et.tostring(root), h)
|
self.vcns.update_dfw_config(et.tostring(root), h)
|
||||||
LOG.info(_LI("L3 Firewall sections were reordered."))
|
LOG.info("L3 Firewall sections were reordered.")
|
||||||
|
|
||||||
|
|
||||||
neutron_sg = NeutronSecurityGroupDB()
|
neutron_sg = NeutronSecurityGroupDB()
|
||||||
@ -304,19 +303,19 @@ def fix_security_groups(resource, event, trigger, **kwargs):
|
|||||||
def migrate_sg_to_policy(resource, event, trigger, **kwargs):
|
def migrate_sg_to_policy(resource, event, trigger, **kwargs):
|
||||||
"""Change the mode of a security group from rules to NSX policy"""
|
"""Change the mode of a security group from rules to NSX policy"""
|
||||||
if not kwargs.get('property'):
|
if not kwargs.get('property'):
|
||||||
LOG.error(_LE("Need to specify security-group-id and policy-id "
|
LOG.error("Need to specify security-group-id and policy-id "
|
||||||
"parameters"))
|
"parameters")
|
||||||
return
|
return
|
||||||
|
|
||||||
# input validation
|
# input validation
|
||||||
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
||||||
sg_id = properties.get('security-group-id')
|
sg_id = properties.get('security-group-id')
|
||||||
if not sg_id:
|
if not sg_id:
|
||||||
LOG.error(_LE("Need to specify security-group-id parameter"))
|
LOG.error("Need to specify security-group-id parameter")
|
||||||
return
|
return
|
||||||
policy_id = properties.get('policy-id')
|
policy_id = properties.get('policy-id')
|
||||||
if not policy_id:
|
if not policy_id:
|
||||||
LOG.error(_LE("Need to specify policy-id parameter"))
|
LOG.error("Need to specify policy-id parameter")
|
||||||
return
|
return
|
||||||
|
|
||||||
# validate that the security group exist and contains rules and no policy
|
# validate that the security group exist and contains rules and no policy
|
||||||
@ -325,45 +324,45 @@ def migrate_sg_to_policy(resource, event, trigger, **kwargs):
|
|||||||
try:
|
try:
|
||||||
secgroup = plugin.get_security_group(context_, sg_id)
|
secgroup = plugin.get_security_group(context_, sg_id)
|
||||||
except ext_sg.SecurityGroupNotFound:
|
except ext_sg.SecurityGroupNotFound:
|
||||||
LOG.error(_LE("Security group %s was not found"), sg_id)
|
LOG.error("Security group %s was not found", sg_id)
|
||||||
return
|
return
|
||||||
if secgroup.get('policy'):
|
if secgroup.get('policy'):
|
||||||
LOG.error(_LE("Security group %s already uses a policy"), sg_id)
|
LOG.error("Security group %s already uses a policy", sg_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
# validate that the policy exists
|
# validate that the policy exists
|
||||||
if not plugin.nsx_v.vcns.validate_inventory(policy_id):
|
if not plugin.nsx_v.vcns.validate_inventory(policy_id):
|
||||||
LOG.error(_LE("NSX policy %s was not found"), policy_id)
|
LOG.error("NSX policy %s was not found", policy_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
# Delete the rules from the security group
|
# Delete the rules from the security group
|
||||||
LOG.info(_LI("Deleting the rules of security group: %s"), sg_id)
|
LOG.info("Deleting the rules of security group: %s", sg_id)
|
||||||
for rule in secgroup.get('security_group_rules', []):
|
for rule in secgroup.get('security_group_rules', []):
|
||||||
try:
|
try:
|
||||||
plugin.delete_security_group_rule(context_, rule['id'])
|
plugin.delete_security_group_rule(context_, rule['id'])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW("Failed to delete rule %(r)s from security "
|
LOG.warning("Failed to delete rule %(r)s from security "
|
||||||
"group %(sg)s: %(e)s"),
|
"group %(sg)s: %(e)s",
|
||||||
{'r': rule['id'], 'sg': sg_id, 'e': e})
|
{'r': rule['id'], 'sg': sg_id, 'e': e})
|
||||||
# continue anyway
|
# continue anyway
|
||||||
|
|
||||||
# Delete the security group FW section
|
# Delete the security group FW section
|
||||||
LOG.info(_LI("Deleting the section of security group: %s"), sg_id)
|
LOG.info("Deleting the section of security group: %s", sg_id)
|
||||||
try:
|
try:
|
||||||
section_uri = plugin._get_section_uri(context_.session, sg_id)
|
section_uri = plugin._get_section_uri(context_.session, sg_id)
|
||||||
plugin._delete_section(section_uri)
|
plugin._delete_section(section_uri)
|
||||||
nsxv_db.delete_neutron_nsx_section_mapping(
|
nsxv_db.delete_neutron_nsx_section_mapping(
|
||||||
context_.session, sg_id)
|
context_.session, sg_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW("Failed to delete firewall section of security "
|
LOG.warning("Failed to delete firewall section of security "
|
||||||
"group %(sg)s: %(e)s"),
|
"group %(sg)s: %(e)s",
|
||||||
{'sg': sg_id, 'e': e})
|
{'sg': sg_id, 'e': e})
|
||||||
# continue anyway
|
# continue anyway
|
||||||
|
|
||||||
# bind this security group to the policy in the backend and DB
|
# bind this security group to the policy in the backend and DB
|
||||||
nsx_sg_id = nsx_db.get_nsx_security_group_id(context_.session, sg_id)
|
nsx_sg_id = nsx_db.get_nsx_security_group_id(context_.session, sg_id)
|
||||||
LOG.info(_LI("Binding the NSX security group %(nsx)s to policy "
|
LOG.info("Binding the NSX security group %(nsx)s to policy "
|
||||||
"%(pol)s"),
|
"%(pol)s",
|
||||||
{'nsx': nsx_sg_id, 'pol': policy_id})
|
{'nsx': nsx_sg_id, 'pol': policy_id})
|
||||||
plugin._update_nsx_security_group_policies(
|
plugin._update_nsx_security_group_policies(
|
||||||
policy_id, None, nsx_sg_id)
|
policy_id, None, nsx_sg_id)
|
||||||
@ -371,7 +370,7 @@ def migrate_sg_to_policy(resource, event, trigger, **kwargs):
|
|||||||
with context_.session.begin(subtransactions=True):
|
with context_.session.begin(subtransactions=True):
|
||||||
prop.update({sg_policy.POLICY: policy_id})
|
prop.update({sg_policy.POLICY: policy_id})
|
||||||
|
|
||||||
LOG.info(_LI("Done."))
|
LOG.info("Done.")
|
||||||
|
|
||||||
|
|
||||||
registry.subscribe(migrate_sg_to_policy,
|
registry.subscribe(migrate_sg_to_policy,
|
||||||
|
@ -23,7 +23,6 @@ import vmware_nsx.shell.resources as shell
|
|||||||
from neutron.callbacks import registry
|
from neutron.callbacks import registry
|
||||||
from neutron_lib import exceptions
|
from neutron_lib import exceptions
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI
|
|
||||||
from vmware_nsx.db import nsxv_db
|
from vmware_nsx.db import nsxv_db
|
||||||
|
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
@ -85,15 +84,15 @@ def nsx_list_missing_spoofguard_policies(resource, event, trigger,
|
|||||||
props = kwargs.get('property')
|
props = kwargs.get('property')
|
||||||
reverse = True if props and props[0] == 'reverse' else False
|
reverse = True if props and props[0] == 'reverse' else False
|
||||||
if reverse:
|
if reverse:
|
||||||
LOG.info(_LI("Spoofguard policies on NSXv but not present in "
|
LOG.info("Spoofguard policies on NSXv but not present in "
|
||||||
"Neutron Db"))
|
"Neutron Db")
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Spoofguard policies in Neutron Db but not present "
|
LOG.info("Spoofguard policies in Neutron Db but not present "
|
||||||
"on NSXv"))
|
"on NSXv")
|
||||||
missing_policies = get_missing_spoofguard_policy_mappings(reverse)
|
missing_policies = get_missing_spoofguard_policy_mappings(reverse)
|
||||||
if not missing_policies:
|
if not missing_policies:
|
||||||
LOG.info(_LI("\nNo missing spoofguard policies found."
|
LOG.info("\nNo missing spoofguard policies found."
|
||||||
"\nNeutron DB and NSXv backend are in sync\n"))
|
"\nNeutron DB and NSXv backend are in sync\n")
|
||||||
else:
|
else:
|
||||||
LOG.info(missing_policies)
|
LOG.info(missing_policies)
|
||||||
missing_policies = [{'policy_id': pid} for pid in missing_policies]
|
missing_policies = [{'policy_id': pid} for pid in missing_policies]
|
||||||
@ -106,33 +105,33 @@ def nsx_clean_spoofguard_policy(resource, event, trigger, **kwargs):
|
|||||||
errmsg = ("Need to specify policy-id. Add --property "
|
errmsg = ("Need to specify policy-id. Add --property "
|
||||||
"policy-id=<policy-id>")
|
"policy-id=<policy-id>")
|
||||||
if not kwargs.get('property'):
|
if not kwargs.get('property'):
|
||||||
LOG.error(_LE("%s"), errmsg)
|
LOG.error("%s", errmsg)
|
||||||
return
|
return
|
||||||
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
||||||
policy_id = properties.get('policy-id')
|
policy_id = properties.get('policy-id')
|
||||||
if not policy_id:
|
if not policy_id:
|
||||||
LOG.error(_LE("%s"), errmsg)
|
LOG.error("%s", errmsg)
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
h, c = nsxv.get_spoofguard_policy(policy_id)
|
h, c = nsxv.get_spoofguard_policy(policy_id)
|
||||||
except exceptions.NeutronException as e:
|
except exceptions.NeutronException as e:
|
||||||
LOG.error(_LE("Unable to retrieve policy %(p)s: %(e)s"),
|
LOG.error("Unable to retrieve policy %(p)s: %(e)s",
|
||||||
{'p': policy_id, 'e': str(e)})
|
{'p': policy_id, 'e': str(e)})
|
||||||
else:
|
else:
|
||||||
if not c['spoofguardList']:
|
if not c['spoofguardList']:
|
||||||
LOG.error(_LE("Policy %s does not exist"), policy_id)
|
LOG.error("Policy %s does not exist", policy_id)
|
||||||
return
|
return
|
||||||
confirm = admin_utils.query_yes_no(
|
confirm = admin_utils.query_yes_no(
|
||||||
"Do you want to delete spoofguard-policy: %s" % policy_id,
|
"Do you want to delete spoofguard-policy: %s" % policy_id,
|
||||||
default="no")
|
default="no")
|
||||||
if not confirm:
|
if not confirm:
|
||||||
LOG.info(_LI("spoofguard-policy deletion aborted by user"))
|
LOG.info("spoofguard-policy deletion aborted by user")
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
nsxv.delete_spoofguard_policy(policy_id)
|
nsxv.delete_spoofguard_policy(policy_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("%s"), str(e))
|
LOG.error("%s", str(e))
|
||||||
LOG.info(_LI('spoofguard-policy successfully deleted.'))
|
LOG.info('spoofguard-policy successfully deleted.')
|
||||||
|
|
||||||
|
|
||||||
registry.subscribe(neutron_list_spoofguard_policy_mappings,
|
registry.subscribe(neutron_list_spoofguard_policy_mappings,
|
||||||
|
@ -21,7 +21,6 @@ from neutron.db import common_db_mixin as common_db
|
|||||||
from neutron_lib import context as neutron_context
|
from neutron_lib import context as neutron_context
|
||||||
from neutron_lib.plugins import directory
|
from neutron_lib.plugins import directory
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LW
|
|
||||||
from vmware_nsx.common import config
|
from vmware_nsx.common import config
|
||||||
from vmware_nsx import plugin
|
from vmware_nsx import plugin
|
||||||
from vmware_nsx.plugins.nsx_v.vshield import vcns
|
from vmware_nsx.plugins.nsx_v.vshield import vcns
|
||||||
@ -77,7 +76,7 @@ class NsxVPluginWrapper(plugin.NsxVPlugin):
|
|||||||
if not self.count_spawn_jobs():
|
if not self.count_spawn_jobs():
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.warning(_LW("Waiting for plugin jobs to finish properly..."))
|
LOG.warning("Waiting for plugin jobs to finish properly...")
|
||||||
sleep_time = 1
|
sleep_time = 1
|
||||||
print_time = 20
|
print_time = 20
|
||||||
max_loop = 600
|
max_loop = 600
|
||||||
@ -85,17 +84,17 @@ class NsxVPluginWrapper(plugin.NsxVPlugin):
|
|||||||
n_jobs = self.count_spawn_jobs()
|
n_jobs = self.count_spawn_jobs()
|
||||||
if n_jobs > 0:
|
if n_jobs > 0:
|
||||||
if (print_index % print_time) == 0:
|
if (print_index % print_time) == 0:
|
||||||
LOG.warning(_LW("Still Waiting on %(jobs)s "
|
LOG.warning("Still Waiting on %(jobs)s "
|
||||||
"job%(plural)s"),
|
"job%(plural)s",
|
||||||
{'jobs': n_jobs,
|
{'jobs': n_jobs,
|
||||||
'plural': 's' if n_jobs > 1 else ''})
|
'plural': 's' if n_jobs > 1 else ''})
|
||||||
time.sleep(sleep_time)
|
time.sleep(sleep_time)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Done."))
|
LOG.warning("Done.")
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.warning(_LW("Sorry. Waited for too long. Some jobs are still "
|
LOG.warning("Sorry. Waited for too long. Some jobs are still "
|
||||||
"running."))
|
"running.")
|
||||||
|
|
||||||
|
|
||||||
def get_nsxv_backend_edges():
|
def get_nsxv_backend_edges():
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LI
|
|
||||||
from vmware_nsx.plugins.nsx_v3 import cert_utils
|
from vmware_nsx.plugins.nsx_v3 import cert_utils
|
||||||
from vmware_nsx.shell.admin.plugins.common import constants
|
from vmware_nsx.shell.admin.plugins.common import constants
|
||||||
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
|
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
|
||||||
@ -55,7 +54,7 @@ def get_nsx_trust_management(**kwargs):
|
|||||||
|
|
||||||
def get_certificate_manager(**kwargs):
|
def get_certificate_manager(**kwargs):
|
||||||
storage_driver_type = cfg.CONF.nsx_v3.nsx_client_cert_storage.lower()
|
storage_driver_type = cfg.CONF.nsx_v3.nsx_client_cert_storage.lower()
|
||||||
LOG.info(_LI("Certificate storage is %s"), storage_driver_type)
|
LOG.info("Certificate storage is %s", storage_driver_type)
|
||||||
if storage_driver_type == 'nsx-db':
|
if storage_driver_type == 'nsx-db':
|
||||||
storage_driver = cert_utils.DbCertificateStorageDriver(
|
storage_driver = cert_utils.DbCertificateStorageDriver(
|
||||||
context.get_admin_context())
|
context.get_admin_context())
|
||||||
@ -75,8 +74,8 @@ def generate_cert(resource, event, trigger, **kwargs):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() == "none":
|
if cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() == "none":
|
||||||
LOG.info(_LI("Generate operation is not supported "
|
LOG.info("Generate operation is not supported "
|
||||||
"with storage type 'none'"))
|
"with storage type 'none'")
|
||||||
return
|
return
|
||||||
|
|
||||||
# update cert defaults based on user input
|
# update cert defaults based on user input
|
||||||
@ -91,7 +90,7 @@ def generate_cert(resource, event, trigger, **kwargs):
|
|||||||
prop = 'valid-days'
|
prop = 'valid-days'
|
||||||
valid_for_days = int(properties.get(prop))
|
valid_for_days = int(properties.get(prop))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
LOG.info(_LI("%s property must be a number"), prop)
|
LOG.info("%s property must be a number", prop)
|
||||||
return
|
return
|
||||||
|
|
||||||
signature_alg = properties.get('sig-alg')
|
signature_alg = properties.get('sig-alg')
|
||||||
@ -105,7 +104,7 @@ def generate_cert(resource, event, trigger, **kwargs):
|
|||||||
|
|
||||||
with get_certificate_manager(**kwargs) as cert:
|
with get_certificate_manager(**kwargs) as cert:
|
||||||
if cert.exists():
|
if cert.exists():
|
||||||
LOG.info(_LI("Deleting existing certificate"))
|
LOG.info("Deleting existing certificate")
|
||||||
# Need to delete cert first
|
# Need to delete cert first
|
||||||
cert.delete()
|
cert.delete()
|
||||||
|
|
||||||
@ -115,7 +114,7 @@ def generate_cert(resource, event, trigger, **kwargs):
|
|||||||
LOG.info(e)
|
LOG.info(e)
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.info(_LI("Client certificate generated succesfully"))
|
LOG.info("Client certificate generated succesfully")
|
||||||
|
|
||||||
|
|
||||||
@admin_utils.output_header
|
@admin_utils.output_header
|
||||||
@ -126,17 +125,17 @@ def delete_cert(resource, event, trigger, **kwargs):
|
|||||||
if cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() == "none":
|
if cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() == "none":
|
||||||
filename = get_cert_filename(**kwargs)
|
filename = get_cert_filename(**kwargs)
|
||||||
if not filename:
|
if not filename:
|
||||||
LOG.info(_LI("Please specify file containing the certificate "
|
LOG.info("Please specify file containing the certificate "
|
||||||
"using filename property"))
|
"using filename property")
|
||||||
return
|
return
|
||||||
cert.delete_pem(filename)
|
cert.delete_pem(filename)
|
||||||
else:
|
else:
|
||||||
if not cert.exists():
|
if not cert.exists():
|
||||||
LOG.info(_LI("Nothing to clean"))
|
LOG.info("Nothing to clean")
|
||||||
return
|
return
|
||||||
|
|
||||||
cert.delete()
|
cert.delete()
|
||||||
LOG.info(_LI("Client certificate deleted succesfully"))
|
LOG.info("Client certificate deleted succesfully")
|
||||||
|
|
||||||
|
|
||||||
@admin_utils.output_header
|
@admin_utils.output_header
|
||||||
@ -152,24 +151,24 @@ def show_cert(resource, event, trigger, **kwargs):
|
|||||||
cert_data['alg'] = cert.get_signature_alg()
|
cert_data['alg'] = cert.get_signature_alg()
|
||||||
cert_data['key_size'] = cert.get_key_size()
|
cert_data['key_size'] = cert.get_key_size()
|
||||||
if expires_in_days >= 0:
|
if expires_in_days >= 0:
|
||||||
LOG.info(_LI("Client certificate is valid. "
|
LOG.info("Client certificate is valid. "
|
||||||
"Expires on %(date)s UTC (in %(days)d days)."),
|
"Expires on %(date)s UTC (in %(days)d days).",
|
||||||
{'date': expires_on,
|
{'date': expires_on,
|
||||||
'days': expires_in_days})
|
'days': expires_in_days})
|
||||||
|
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Client certificate expired on %s."), expires_on)
|
LOG.info("Client certificate expired on %s.", expires_on)
|
||||||
|
|
||||||
LOG.info(_LI("Key Size %(key_size)s, "
|
LOG.info("Key Size %(key_size)s, "
|
||||||
"Signature Algorithm %(alg)s\n"
|
"Signature Algorithm %(alg)s\n"
|
||||||
"Subject: Country %(country)s, State %(state)s, "
|
"Subject: Country %(country)s, State %(state)s, "
|
||||||
"Organization %(organization)s, Unit %(unit)s, "
|
"Organization %(organization)s, Unit %(unit)s, "
|
||||||
"Common Name %(hostname)s"), cert_data)
|
"Common Name %(hostname)s", cert_data)
|
||||||
|
|
||||||
LOG.info(cert_pem)
|
LOG.info(cert_pem)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Client certificate is not registered "
|
LOG.info("Client certificate is not registered "
|
||||||
"in storage"))
|
"in storage")
|
||||||
|
|
||||||
|
|
||||||
def get_cert_filename(**kwargs):
|
def get_cert_filename(**kwargs):
|
||||||
@ -179,8 +178,8 @@ def get_cert_filename(**kwargs):
|
|||||||
filename = properties.get('filename', filename)
|
filename = properties.get('filename', filename)
|
||||||
|
|
||||||
if not filename:
|
if not filename:
|
||||||
LOG.info(_LI("Please specify file containing the certificate "
|
LOG.info("Please specify file containing the certificate "
|
||||||
"using filename property"))
|
"using filename property")
|
||||||
return filename
|
return filename
|
||||||
|
|
||||||
|
|
||||||
@ -189,13 +188,13 @@ def import_cert(resource, event, trigger, **kwargs):
|
|||||||
"""Import client certificate that was generated externally"""
|
"""Import client certificate that was generated externally"""
|
||||||
|
|
||||||
if cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() != "none":
|
if cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() != "none":
|
||||||
LOG.info(_LI("Import operation is supported "
|
LOG.info("Import operation is supported "
|
||||||
"with storage type 'none' only"))
|
"with storage type 'none' only")
|
||||||
return
|
return
|
||||||
|
|
||||||
with get_certificate_manager(**kwargs) as cert:
|
with get_certificate_manager(**kwargs) as cert:
|
||||||
if cert.exists():
|
if cert.exists():
|
||||||
LOG.info(_LI("Deleting existing certificate"))
|
LOG.info("Deleting existing certificate")
|
||||||
cert.delete()
|
cert.delete()
|
||||||
|
|
||||||
filename = get_cert_filename(**kwargs)
|
filename = get_cert_filename(**kwargs)
|
||||||
@ -203,7 +202,7 @@ def import_cert(resource, event, trigger, **kwargs):
|
|||||||
return
|
return
|
||||||
cert.import_pem(filename)
|
cert.import_pem(filename)
|
||||||
|
|
||||||
LOG.info(_LI("Client certificate imported succesfully"))
|
LOG.info("Client certificate imported succesfully")
|
||||||
|
|
||||||
|
|
||||||
@admin_utils.output_header
|
@admin_utils.output_header
|
||||||
@ -213,11 +212,11 @@ def show_nsx_certs(resource, event, trigger, **kwargs):
|
|||||||
|
|
||||||
ids = nsx_trust.get_identities(cert_utils.NSX_OPENSTACK_IDENTITY)
|
ids = nsx_trust.get_identities(cert_utils.NSX_OPENSTACK_IDENTITY)
|
||||||
if not ids:
|
if not ids:
|
||||||
LOG.info(_LI("Principal identity %s not found"),
|
LOG.info("Principal identity %s not found",
|
||||||
cert_utils.NSX_OPENSTACK_IDENTITY)
|
cert_utils.NSX_OPENSTACK_IDENTITY)
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.info(_LI("Certificate(s) associated with principal identity %s\n"),
|
LOG.info("Certificate(s) associated with principal identity %s\n",
|
||||||
cert_utils.NSX_OPENSTACK_IDENTITY)
|
cert_utils.NSX_OPENSTACK_IDENTITY)
|
||||||
|
|
||||||
cert = None
|
cert = None
|
||||||
@ -228,7 +227,7 @@ def show_nsx_certs(resource, event, trigger, **kwargs):
|
|||||||
LOG.info(cert['pem_encoded'])
|
LOG.info(cert['pem_encoded'])
|
||||||
|
|
||||||
if not cert:
|
if not cert:
|
||||||
LOG.info(_LI("No certificates found"))
|
LOG.info("No certificates found")
|
||||||
|
|
||||||
|
|
||||||
registry.subscribe(generate_cert,
|
registry.subscribe(generate_cert,
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
from neutron.callbacks import registry
|
from neutron.callbacks import registry
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI
|
|
||||||
from vmware_nsx.shell.admin.plugins.common import constants
|
from vmware_nsx.shell.admin.plugins.common import constants
|
||||||
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
|
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
|
||||||
from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils
|
from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils
|
||||||
@ -30,9 +29,9 @@ def validate_configuration(resource, event, trigger, **kwargs):
|
|||||||
try:
|
try:
|
||||||
utils.NsxV3PluginWrapper()
|
utils.NsxV3PluginWrapper()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Configuration validation failed: %s"), e)
|
LOG.error("Configuration validation failed: %s", e)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Configuration validation succeeded"))
|
LOG.info("Configuration validation succeeded")
|
||||||
|
|
||||||
|
|
||||||
registry.subscribe(validate_configuration,
|
registry.subscribe(validate_configuration,
|
||||||
|
@ -19,7 +19,6 @@ from neutron_lib import constants as const
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI
|
|
||||||
from vmware_nsx.common import utils as nsx_utils
|
from vmware_nsx.common import utils as nsx_utils
|
||||||
from vmware_nsx.shell.admin.plugins.common import constants
|
from vmware_nsx.shell.admin.plugins.common import constants
|
||||||
from vmware_nsx.shell.admin.plugins.common import formatters
|
from vmware_nsx.shell.admin.plugins.common import formatters
|
||||||
@ -50,7 +49,7 @@ def nsx_update_dhcp_bindings(resource, event, trigger, **kwargs):
|
|||||||
|
|
||||||
nsx_version = nsxlib.get_version()
|
nsx_version = nsxlib.get_version()
|
||||||
if not nsx_utils.is_nsx_version_1_1_0(nsx_version):
|
if not nsx_utils.is_nsx_version_1_1_0(nsx_version):
|
||||||
LOG.error(_LE("This utility is not available for NSX version %s"),
|
LOG.error("This utility is not available for NSX version %s",
|
||||||
nsx_version)
|
nsx_version)
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -60,7 +59,7 @@ def nsx_update_dhcp_bindings(resource, event, trigger, **kwargs):
|
|||||||
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
||||||
dhcp_profile_uuid = properties.get('dhcp_profile_uuid')
|
dhcp_profile_uuid = properties.get('dhcp_profile_uuid')
|
||||||
if not dhcp_profile_uuid:
|
if not dhcp_profile_uuid:
|
||||||
LOG.error(_LE("dhcp_profile_uuid is not defined"))
|
LOG.error("dhcp_profile_uuid is not defined")
|
||||||
return
|
return
|
||||||
|
|
||||||
cfg.CONF.set_override('dhcp_agent_notification', False)
|
cfg.CONF.set_override('dhcp_agent_notification', False)
|
||||||
@ -98,8 +97,8 @@ def nsx_update_dhcp_bindings(resource, event, trigger, **kwargs):
|
|||||||
network, subnet, port, net_tags)
|
network, subnet, port, net_tags)
|
||||||
server_data['dhcp_profile_id'] = dhcp_profile_uuid
|
server_data['dhcp_profile_id'] = dhcp_profile_uuid
|
||||||
dhcp_server = dhcp_server_resource.create(**server_data)
|
dhcp_server = dhcp_server_resource.create(**server_data)
|
||||||
LOG.info(_LI("Created logical DHCP server %(server)s for "
|
LOG.info("Created logical DHCP server %(server)s for "
|
||||||
"network %(network)s"),
|
"network %(network)s",
|
||||||
{'server': dhcp_server['id'],
|
{'server': dhcp_server['id'],
|
||||||
'network': port['network_id']})
|
'network': port['network_id']})
|
||||||
# Add DHCP service binding in neutron DB.
|
# Add DHCP service binding in neutron DB.
|
||||||
@ -112,8 +111,8 @@ def nsx_update_dhcp_bindings(resource, event, trigger, **kwargs):
|
|||||||
lport_id, dhcp_server['id'],
|
lport_id, dhcp_server['id'],
|
||||||
attachment_type=nsx_constants.ATTACHMENT_DHCP)
|
attachment_type=nsx_constants.ATTACHMENT_DHCP)
|
||||||
server_bindings[lswitch_id] = dhcp_server['id']
|
server_bindings[lswitch_id] = dhcp_server['id']
|
||||||
LOG.info(_LI("Updated DHCP logical port %(port)s for "
|
LOG.info("Updated DHCP logical port %(port)s for "
|
||||||
"network %(network)s"),
|
"network %(network)s",
|
||||||
{'port': lport_id, 'network': port['network_id']})
|
{'port': lport_id, 'network': port['network_id']})
|
||||||
elif subnet['enable_dhcp']:
|
elif subnet['enable_dhcp']:
|
||||||
# Store (mac, ip) binding of each compute port in a
|
# Store (mac, ip) binding of each compute port in a
|
||||||
@ -144,8 +143,8 @@ def nsx_update_dhcp_bindings(resource, event, trigger, **kwargs):
|
|||||||
# Add DHCP static binding in neutron DB.
|
# Add DHCP static binding in neutron DB.
|
||||||
neutron_client.add_dhcp_static_binding(
|
neutron_client.add_dhcp_static_binding(
|
||||||
port_id, subnet_id, ip, dhcp_server_id, binding['id'])
|
port_id, subnet_id, ip, dhcp_server_id, binding['id'])
|
||||||
LOG.info(_LI("Added DHCP binding (mac: %(mac)s, ip: %(ip)s) "
|
LOG.info("Added DHCP binding (mac: %(mac)s, ip: %(ip)s) "
|
||||||
"for neutron port %(port)s"),
|
"for neutron port %(port)s",
|
||||||
{'mac': mac, 'ip': ip, 'port': port_id})
|
{'mac': mac, 'ip': ip, 'port': port_id})
|
||||||
|
|
||||||
|
|
||||||
|
@ -17,7 +17,6 @@ from neutron_lib import context
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI
|
|
||||||
from vmware_nsx.common import utils as nsx_utils
|
from vmware_nsx.common import utils as nsx_utils
|
||||||
from vmware_nsx.db import db as nsx_db
|
from vmware_nsx.db import db as nsx_db
|
||||||
from vmware_nsx.shell.admin.plugins.common import constants
|
from vmware_nsx.shell.admin.plugins.common import constants
|
||||||
@ -98,13 +97,13 @@ def nsx_list_orphaned_dhcp_servers(resource, event, trigger, **kwargs):
|
|||||||
|
|
||||||
nsx_version = nsxlib.get_version()
|
nsx_version = nsxlib.get_version()
|
||||||
if not nsx_utils.is_nsx_version_1_1_0(nsx_version):
|
if not nsx_utils.is_nsx_version_1_1_0(nsx_version):
|
||||||
LOG.error(_LE("This utility is not available for NSX version %s"),
|
LOG.error("This utility is not available for NSX version %s",
|
||||||
nsx_version)
|
nsx_version)
|
||||||
return
|
return
|
||||||
|
|
||||||
dhcp_profile_uuid = _get_dhcp_profile_uuid(**kwargs)
|
dhcp_profile_uuid = _get_dhcp_profile_uuid(**kwargs)
|
||||||
if not dhcp_profile_uuid:
|
if not dhcp_profile_uuid:
|
||||||
LOG.error(_LE("dhcp_profile_uuid is not defined"))
|
LOG.error("dhcp_profile_uuid is not defined")
|
||||||
return
|
return
|
||||||
|
|
||||||
orphaned_servers = _get_orphaned_dhcp_servers(dhcp_profile_uuid)
|
orphaned_servers = _get_orphaned_dhcp_servers(dhcp_profile_uuid)
|
||||||
@ -124,13 +123,13 @@ def nsx_clean_orphaned_dhcp_servers(resource, event, trigger, **kwargs):
|
|||||||
|
|
||||||
nsx_version = nsxlib.get_version()
|
nsx_version = nsxlib.get_version()
|
||||||
if not nsx_utils.is_nsx_version_1_1_0(nsx_version):
|
if not nsx_utils.is_nsx_version_1_1_0(nsx_version):
|
||||||
LOG.error(_LE("This utility is not available for NSX version %s"),
|
LOG.error("This utility is not available for NSX version %s",
|
||||||
nsx_version)
|
nsx_version)
|
||||||
return
|
return
|
||||||
|
|
||||||
dhcp_profile_uuid = _get_dhcp_profile_uuid(**kwargs)
|
dhcp_profile_uuid = _get_dhcp_profile_uuid(**kwargs)
|
||||||
if not dhcp_profile_uuid:
|
if not dhcp_profile_uuid:
|
||||||
LOG.error(_LE("dhcp_profile_uuid is not defined"))
|
LOG.error("dhcp_profile_uuid is not defined")
|
||||||
return
|
return
|
||||||
|
|
||||||
cfg.CONF.set_override('dhcp_agent_notification', False)
|
cfg.CONF.set_override('dhcp_agent_notification', False)
|
||||||
@ -153,10 +152,10 @@ def nsx_clean_orphaned_dhcp_servers(resource, event, trigger, **kwargs):
|
|||||||
nsx_db.delete_neutron_nsx_service_binding(
|
nsx_db.delete_neutron_nsx_service_binding(
|
||||||
context.get_admin_context().session, net_id,
|
context.get_admin_context().session, net_id,
|
||||||
nsx_constants.SERVICE_DHCP)
|
nsx_constants.SERVICE_DHCP)
|
||||||
LOG.info(_LI("Removed orphaned DHCP server %s"), server['id'])
|
LOG.info("Removed orphaned DHCP server %s", server['id'])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to clean orphaned DHCP server %(id)s. "
|
LOG.error("Failed to clean orphaned DHCP server %(id)s. "
|
||||||
"Exception: %(e)s"), {'id': server['id'], 'e': e})
|
"Exception: %(e)s", {'id': server['id'], 'e': e})
|
||||||
|
|
||||||
|
|
||||||
registry.subscribe(nsx_list_orphaned_dhcp_servers,
|
registry.subscribe(nsx_list_orphaned_dhcp_servers,
|
||||||
|
@ -17,7 +17,6 @@ from neutron_lib import constants as const
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI
|
|
||||||
from vmware_nsx.common import utils as nsx_utils
|
from vmware_nsx.common import utils as nsx_utils
|
||||||
from vmware_nsx.dhcp_meta import rpc as nsx_rpc
|
from vmware_nsx.dhcp_meta import rpc as nsx_rpc
|
||||||
from vmware_nsx.shell.admin.plugins.common import constants
|
from vmware_nsx.shell.admin.plugins.common import constants
|
||||||
@ -60,7 +59,7 @@ def nsx_update_metadata_proxy(resource, event, trigger, **kwargs):
|
|||||||
|
|
||||||
nsx_version = nsxlib.get_version()
|
nsx_version = nsxlib.get_version()
|
||||||
if not nsx_utils.is_nsx_version_1_1_0(nsx_version):
|
if not nsx_utils.is_nsx_version_1_1_0(nsx_version):
|
||||||
LOG.error(_LE("This utility is not available for NSX version %s"),
|
LOG.error("This utility is not available for NSX version %s",
|
||||||
nsx_version)
|
nsx_version)
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -69,7 +68,7 @@ def nsx_update_metadata_proxy(resource, event, trigger, **kwargs):
|
|||||||
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
||||||
metadata_proxy_uuid = properties.get('metadata_proxy_uuid')
|
metadata_proxy_uuid = properties.get('metadata_proxy_uuid')
|
||||||
if not metadata_proxy_uuid:
|
if not metadata_proxy_uuid:
|
||||||
LOG.error(_LE("metadata_proxy_uuid is not defined"))
|
LOG.error("metadata_proxy_uuid is not defined")
|
||||||
return
|
return
|
||||||
|
|
||||||
cfg.CONF.set_override('dhcp_agent_notification', False)
|
cfg.CONF.set_override('dhcp_agent_notification', False)
|
||||||
@ -97,9 +96,9 @@ def nsx_update_metadata_proxy(resource, event, trigger, **kwargs):
|
|||||||
router_id = ports[0]['device_id']
|
router_id = ports[0]['device_id']
|
||||||
interface = {'subnet_id': network['subnets'][0]}
|
interface = {'subnet_id': network['subnets'][0]}
|
||||||
plugin.remove_router_interface(router_id, interface)
|
plugin.remove_router_interface(router_id, interface)
|
||||||
LOG.info(_LI("Removed metadata interface on router %s"), router_id)
|
LOG.info("Removed metadata interface on router %s", router_id)
|
||||||
plugin.delete_network(network['id'])
|
plugin.delete_network(network['id'])
|
||||||
LOG.info(_LI("Removed metadata network %s"), network['id'])
|
LOG.info("Removed metadata network %s", network['id'])
|
||||||
else:
|
else:
|
||||||
lswitch_id = neutron_client.net_id_to_lswitch_id(network['id'])
|
lswitch_id = neutron_client.net_id_to_lswitch_id(network['id'])
|
||||||
if not lswitch_id:
|
if not lswitch_id:
|
||||||
@ -112,7 +111,7 @@ def nsx_update_metadata_proxy(resource, event, trigger, **kwargs):
|
|||||||
port_resource.create(
|
port_resource.create(
|
||||||
lswitch_id, metadata_proxy_uuid, tags=tags, name=name,
|
lswitch_id, metadata_proxy_uuid, tags=tags, name=name,
|
||||||
attachment_type=nsx_constants.ATTACHMENT_MDPROXY)
|
attachment_type=nsx_constants.ATTACHMENT_MDPROXY)
|
||||||
LOG.info(_LI("Enabled native metadata proxy for network %s"),
|
LOG.info("Enabled native metadata proxy for network %s",
|
||||||
network['id'])
|
network['id'])
|
||||||
|
|
||||||
|
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LI
|
|
||||||
from vmware_nsx.db import db as nsx_db
|
from vmware_nsx.db import db as nsx_db
|
||||||
from vmware_nsx.shell.admin.plugins.common import constants
|
from vmware_nsx.shell.admin.plugins.common import constants
|
||||||
from vmware_nsx.shell.admin.plugins.common import formatters
|
from vmware_nsx.shell.admin.plugins.common import formatters
|
||||||
@ -60,13 +59,13 @@ def list_missing_networks(resource, event, trigger, **kwargs):
|
|||||||
'neutron_id': neutron_id,
|
'neutron_id': neutron_id,
|
||||||
'nsx_id': nsx_id})
|
'nsx_id': nsx_id})
|
||||||
if len(networks) > 0:
|
if len(networks) > 0:
|
||||||
title = _LI("Found %d internal networks missing from the NSX "
|
title = ("Found %d internal networks missing from the NSX "
|
||||||
"manager:") % len(networks)
|
"manager:") % len(networks)
|
||||||
LOG.info(formatters.output_formatter(
|
LOG.info(formatters.output_formatter(
|
||||||
title, networks,
|
title, networks,
|
||||||
['name', 'neutron_id', 'nsx_id']))
|
['name', 'neutron_id', 'nsx_id']))
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("All internal networks exist on the NSX manager"))
|
LOG.info("All internal networks exist on the NSX manager")
|
||||||
|
|
||||||
|
|
||||||
registry.subscribe(list_missing_networks,
|
registry.subscribe(list_missing_networks,
|
||||||
|
@ -17,7 +17,6 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from sqlalchemy.orm import exc
|
from sqlalchemy.orm import exc
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI, _LW
|
|
||||||
from vmware_nsx.db import db as nsx_db
|
from vmware_nsx.db import db as nsx_db
|
||||||
from vmware_nsx.db import nsx_models
|
from vmware_nsx.db import nsx_models
|
||||||
from vmware_nsx.dvs import dvs
|
from vmware_nsx.dvs import dvs
|
||||||
@ -90,7 +89,7 @@ def get_dhcp_profile_id(profile_client):
|
|||||||
plugin.NSX_V3_DHCP_PROFILE_NAME)
|
plugin.NSX_V3_DHCP_PROFILE_NAME)
|
||||||
if profiles and len(profiles) == 1:
|
if profiles and len(profiles) == 1:
|
||||||
return profiles[0]['id']
|
return profiles[0]['id']
|
||||||
LOG.warning(_LW("Could not find DHCP profile on backend"))
|
LOG.warning("Could not find DHCP profile on backend")
|
||||||
|
|
||||||
|
|
||||||
def get_spoofguard_profile_id(profile_client):
|
def get_spoofguard_profile_id(profile_client):
|
||||||
@ -98,12 +97,12 @@ def get_spoofguard_profile_id(profile_client):
|
|||||||
plugin.NSX_V3_PSEC_PROFILE_NAME)
|
plugin.NSX_V3_PSEC_PROFILE_NAME)
|
||||||
if profiles and len(profiles) == 1:
|
if profiles and len(profiles) == 1:
|
||||||
return profiles[0]['id']
|
return profiles[0]['id']
|
||||||
LOG.warning(_LW("Could not find Spoof Guard profile on backend"))
|
LOG.warning("Could not find Spoof Guard profile on backend")
|
||||||
|
|
||||||
|
|
||||||
def add_profile_mismatch(problems, neutron_id, nsx_id, prf_id, title):
|
def add_profile_mismatch(problems, neutron_id, nsx_id, prf_id, title):
|
||||||
msg = (_LI('Wrong %(title)s profile %(prf_id)s') % {'title': title,
|
msg = ('Wrong %(title)s profile %(prf_id)s') % {'title': title,
|
||||||
'prf_id': prf_id})
|
'prf_id': prf_id}
|
||||||
problems.append({'neutron_id': neutron_id,
|
problems.append({'neutron_id': neutron_id,
|
||||||
'nsx_id': nsx_id,
|
'nsx_id': nsx_id,
|
||||||
'error': msg})
|
'error': msg})
|
||||||
@ -141,7 +140,7 @@ def list_missing_ports(resource, event, trigger, **kwargs):
|
|||||||
except nsx_exc.ResourceNotFound:
|
except nsx_exc.ResourceNotFound:
|
||||||
problems.append({'neutron_id': neutron_id,
|
problems.append({'neutron_id': neutron_id,
|
||||||
'nsx_id': nsx_id,
|
'nsx_id': nsx_id,
|
||||||
'error': _LI('Missing from backend')})
|
'error': 'Missing from backend'})
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Port found on backend!
|
# Port found on backend!
|
||||||
@ -184,13 +183,13 @@ def list_missing_ports(resource, event, trigger, **kwargs):
|
|||||||
prf_id, "Spoof Guard")
|
prf_id, "Spoof Guard")
|
||||||
|
|
||||||
if len(problems) > 0:
|
if len(problems) > 0:
|
||||||
title = _LI("Found internal ports misconfiguration on the "
|
title = ("Found internal ports misconfiguration on the "
|
||||||
"NSX manager:")
|
"NSX manager:")
|
||||||
LOG.info(formatters.output_formatter(
|
LOG.info(formatters.output_formatter(
|
||||||
title, problems,
|
title, problems,
|
||||||
['neutron_id', 'nsx_id', 'error']))
|
['neutron_id', 'nsx_id', 'error']))
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("All internal ports verified on the NSX manager"))
|
LOG.info("All internal ports verified on the NSX manager")
|
||||||
|
|
||||||
|
|
||||||
def get_vm_network_device(vm_mng, vm_moref, mac_address):
|
def get_vm_network_device(vm_mng, vm_moref, mac_address):
|
||||||
@ -218,8 +217,8 @@ def migrate_compute_ports_vms(resource, event, trigger, **kwargs):
|
|||||||
try:
|
try:
|
||||||
vm_mng = dvs.VMManager()
|
vm_mng = dvs.VMManager()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Cannot connect to the DVS: Please update the [dvs] "
|
LOG.error("Cannot connect to the DVS: Please update the [dvs] "
|
||||||
"section in the nsx.ini file: %s"), e)
|
"section in the nsx.ini file: %s", e)
|
||||||
return
|
return
|
||||||
|
|
||||||
# Go over all the compute ports from the plugin
|
# Go over all the compute ports from the plugin
|
||||||
@ -235,7 +234,7 @@ def migrate_compute_ports_vms(resource, event, trigger, **kwargs):
|
|||||||
vm_moref = vm_mng.get_vm_moref_obj(device_id)
|
vm_moref = vm_mng.get_vm_moref_obj(device_id)
|
||||||
vm_spec = vm_mng.get_vm_spec(vm_moref)
|
vm_spec = vm_mng.get_vm_spec(vm_moref)
|
||||||
if not vm_spec:
|
if not vm_spec:
|
||||||
LOG.error(_LE("Failed to get the spec of vm %s"), device_id)
|
LOG.error("Failed to get the spec of vm %s", device_id)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Go over the VM interfaces and check if it should be updated
|
# Go over the VM interfaces and check if it should be updated
|
||||||
@ -248,22 +247,22 @@ def migrate_compute_ports_vms(resource, event, trigger, **kwargs):
|
|||||||
update_spec = True
|
update_spec = True
|
||||||
|
|
||||||
if not update_spec:
|
if not update_spec:
|
||||||
LOG.info(_LI("No need to update the spec of vm %s"), device_id)
|
LOG.info("No need to update the spec of vm %s", device_id)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# find the old interface by it's mac and delete it
|
# find the old interface by it's mac and delete it
|
||||||
device = get_vm_network_device(vm_mng, vm_moref, port['mac_address'])
|
device = get_vm_network_device(vm_mng, vm_moref, port['mac_address'])
|
||||||
if device is None:
|
if device is None:
|
||||||
LOG.warning(_LW("No device with MAC address %s exists on the VM"),
|
LOG.warning("No device with MAC address %s exists on the VM",
|
||||||
port['mac_address'])
|
port['mac_address'])
|
||||||
continue
|
continue
|
||||||
device_type = device.__class__.__name__
|
device_type = device.__class__.__name__
|
||||||
|
|
||||||
LOG.info(_LI("Detaching old interface from VM %s"), device_id)
|
LOG.info("Detaching old interface from VM %s", device_id)
|
||||||
vm_mng.detach_vm_interface(vm_moref, device)
|
vm_mng.detach_vm_interface(vm_moref, device)
|
||||||
|
|
||||||
# add the new interface as OpaqueNetwork
|
# add the new interface as OpaqueNetwork
|
||||||
LOG.info(_LI("Attaching new interface to VM %s"), device_id)
|
LOG.info("Attaching new interface to VM %s", device_id)
|
||||||
nsx_net_id = get_network_nsx_id(admin_cxt.session, port['network_id'])
|
nsx_net_id = get_network_nsx_id(admin_cxt.session, port['network_id'])
|
||||||
vm_mng.attach_vm_interface(vm_moref, port['id'], port['mac_address'],
|
vm_mng.attach_vm_interface(vm_moref, port['id'], port['mac_address'],
|
||||||
nsx_net_id, device_type)
|
nsx_net_id, device_type)
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LI
|
|
||||||
from vmware_nsx.db import db as nsx_db
|
from vmware_nsx.db import db as nsx_db
|
||||||
from vmware_nsx.shell.admin.plugins.common import constants
|
from vmware_nsx.shell.admin.plugins.common import constants
|
||||||
from vmware_nsx.shell.admin.plugins.common import formatters
|
from vmware_nsx.shell.admin.plugins.common import formatters
|
||||||
@ -68,13 +67,13 @@ def list_missing_routers(resource, event, trigger, **kwargs):
|
|||||||
'neutron_id': neutron_id,
|
'neutron_id': neutron_id,
|
||||||
'nsx_id': nsx_id})
|
'nsx_id': nsx_id})
|
||||||
if len(routers) > 0:
|
if len(routers) > 0:
|
||||||
title = _LI("Found %d routers missing from the NSX "
|
title = ("Found %d routers missing from the NSX "
|
||||||
"manager:") % len(routers)
|
"manager:") % len(routers)
|
||||||
LOG.info(formatters.output_formatter(
|
LOG.info(formatters.output_formatter(
|
||||||
title, routers,
|
title, routers,
|
||||||
['name', 'neutron_id', 'nsx_id']))
|
['name', 'neutron_id', 'nsx_id']))
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("All routers exist on the NSX manager"))
|
LOG.info("All routers exist on the NSX manager")
|
||||||
|
|
||||||
|
|
||||||
registry.subscribe(list_missing_routers,
|
registry.subscribe(list_missing_routers,
|
||||||
|
@ -30,7 +30,6 @@ from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
|
|||||||
from vmware_nsx.shell.admin.plugins.nsxv3.resources import ports
|
from vmware_nsx.shell.admin.plugins.nsxv3.resources import ports
|
||||||
from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils as v3_utils
|
from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils as v3_utils
|
||||||
from vmware_nsx.shell import resources as shell
|
from vmware_nsx.shell import resources as shell
|
||||||
from vmware_nsx._i18n import _LE, _LW
|
|
||||||
from vmware_nsxlib.v3 import nsx_constants as consts
|
from vmware_nsxlib.v3 import nsx_constants as consts
|
||||||
from vmware_nsxlib.v3 import security
|
from vmware_nsxlib.v3 import security
|
||||||
|
|
||||||
@ -271,17 +270,17 @@ def _update_security_group_dynamic_criteria():
|
|||||||
membership_criteria=membership_criteria,
|
membership_criteria=membership_criteria,
|
||||||
members=[])
|
members=[])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW("Failed to update membership criteria for nsgroup "
|
LOG.warning("Failed to update membership criteria for nsgroup "
|
||||||
"%(nsgroup_id)s, request to backend returned "
|
"%(nsgroup_id)s, request to backend returned "
|
||||||
"with error: %(error)s"),
|
"with error: %(error)s",
|
||||||
{'nsgroup_id': nsgroup_id, 'error': str(e)})
|
{'nsgroup_id': nsgroup_id, 'error': str(e)})
|
||||||
|
|
||||||
|
|
||||||
@admin_utils.output_header
|
@admin_utils.output_header
|
||||||
def migrate_nsgroups_to_dynamic_criteria(resource, event, trigger, **kwargs):
|
def migrate_nsgroups_to_dynamic_criteria(resource, event, trigger, **kwargs):
|
||||||
if not utils.is_nsx_version_1_1_0(nsxlib.get_version()):
|
if not utils.is_nsx_version_1_1_0(nsxlib.get_version()):
|
||||||
LOG.error(_LE("Dynamic criteria grouping feature isn't supported by "
|
LOG.error("Dynamic criteria grouping feature isn't supported by "
|
||||||
"this NSX version."))
|
"this NSX version.")
|
||||||
return
|
return
|
||||||
# First, we add the criteria tags for all ports.
|
# First, we add the criteria tags for all ports.
|
||||||
_update_ports_dynamic_criteria_tags()
|
_update_ports_dynamic_criteria_tags()
|
||||||
|
@ -31,7 +31,6 @@ from neutron.callbacks import registry
|
|||||||
from neutron.common import config as neutron_config
|
from neutron.common import config as neutron_config
|
||||||
from neutron.conf import common as neutron_common_config
|
from neutron.conf import common as neutron_common_config
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LE, _LI
|
|
||||||
from vmware_nsx.common import config # noqa
|
from vmware_nsx.common import config # noqa
|
||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
@ -74,11 +73,11 @@ def _init_cfg():
|
|||||||
|
|
||||||
def _validate_resource_choice(resource, nsx_plugin):
|
def _validate_resource_choice(resource, nsx_plugin):
|
||||||
if nsx_plugin == 'nsxv' and resource not in resources.nsxv_resources:
|
if nsx_plugin == 'nsxv' and resource not in resources.nsxv_resources:
|
||||||
LOG.error(_LE('Supported list of NSX-V resources: %s'),
|
LOG.error('Supported list of NSX-V resources: %s',
|
||||||
resources.nsxv_resources_names)
|
resources.nsxv_resources_names)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
elif nsx_plugin == 'nsxv3'and resource not in resources.nsxv3_resources:
|
elif nsx_plugin == 'nsxv3'and resource not in resources.nsxv3_resources:
|
||||||
LOG.error(_LE('Supported list of NSX-V3 resources: %s'),
|
LOG.error('Supported list of NSX-V3 resources: %s',
|
||||||
resources.nsxv3_resources_names)
|
resources.nsxv3_resources_names)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
@ -88,16 +87,16 @@ def _validate_op_choice(choice, nsx_plugin):
|
|||||||
supported_resource_ops = \
|
supported_resource_ops = \
|
||||||
resources.nsxv_resources[cfg.CONF.resource].supported_ops
|
resources.nsxv_resources[cfg.CONF.resource].supported_ops
|
||||||
if choice not in supported_resource_ops:
|
if choice not in supported_resource_ops:
|
||||||
LOG.error(_LE('Supported list of operations for the NSX-V '
|
LOG.error('Supported list of operations for the NSX-V '
|
||||||
'resource %s'), supported_resource_ops)
|
'resource %s', supported_resource_ops)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
elif nsx_plugin == 'nsxv3':
|
elif nsx_plugin == 'nsxv3':
|
||||||
supported_resource_ops = \
|
supported_resource_ops = \
|
||||||
resources.nsxv3_resources[cfg.CONF.resource].supported_ops
|
resources.nsxv3_resources[cfg.CONF.resource].supported_ops
|
||||||
if choice not in supported_resource_ops:
|
if choice not in supported_resource_ops:
|
||||||
LOG.error(_LE('Supported list of operations for the NSX-V3 '
|
LOG.error('Supported list of operations for the NSX-V3 '
|
||||||
'resource %s'), supported_resource_ops)
|
'resource %s', supported_resource_ops)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
@ -107,7 +106,7 @@ def main(argv=sys.argv[1:]):
|
|||||||
resources.init_resource_plugin(
|
resources.init_resource_plugin(
|
||||||
nsx_plugin_in_use,
|
nsx_plugin_in_use,
|
||||||
resources.get_plugin_dir(nsx_plugin_in_use))
|
resources.get_plugin_dir(nsx_plugin_in_use))
|
||||||
LOG.info(_LI('NSX Plugin in use: %s'), nsx_plugin_in_use)
|
LOG.info('NSX Plugin in use: %s', nsx_plugin_in_use)
|
||||||
|
|
||||||
_validate_resource_choice(cfg.CONF.resource, nsx_plugin_in_use)
|
_validate_resource_choice(cfg.CONF.resource, nsx_plugin_in_use)
|
||||||
_validate_op_choice(cfg.CONF.operation, nsx_plugin_in_use)
|
_validate_op_choice(cfg.CONF.operation, nsx_plugin_in_use)
|
||||||
|
@ -21,7 +21,6 @@ from neutron.tests import base
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from six.moves import http_client as httplib
|
from six.moves import http_client as httplib
|
||||||
|
|
||||||
from vmware_nsx._i18n import _LI
|
|
||||||
from vmware_nsx.api_client import (
|
from vmware_nsx.api_client import (
|
||||||
eventlet_client as client)
|
eventlet_client as client)
|
||||||
from vmware_nsx.api_client import (
|
from vmware_nsx.api_client import (
|
||||||
@ -60,7 +59,7 @@ class ApiRequestEventletTest(base.BaseTestCase):
|
|||||||
def test_apirequest_spawn(self):
|
def test_apirequest_spawn(self):
|
||||||
def x(id):
|
def x(id):
|
||||||
eventlet.greenthread.sleep(random.random())
|
eventlet.greenthread.sleep(random.random())
|
||||||
LOG.info(_LI('spawned: %d'), id)
|
LOG.info('spawned: %d', id)
|
||||||
|
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
request.EventletApiRequest._spawn(x, i)
|
request.EventletApiRequest._spawn(x, i)
|
||||||
@ -72,7 +71,7 @@ class ApiRequestEventletTest(base.BaseTestCase):
|
|||||||
a._handle_request = mock.Mock()
|
a._handle_request = mock.Mock()
|
||||||
a.start()
|
a.start()
|
||||||
eventlet.greenthread.sleep(0.1)
|
eventlet.greenthread.sleep(0.1)
|
||||||
LOG.info(_LI('_handle_request called: %s'),
|
LOG.info('_handle_request called: %s',
|
||||||
a._handle_request.called)
|
a._handle_request.called)
|
||||||
request.EventletApiRequest.joinall()
|
request.EventletApiRequest.joinall()
|
||||||
|
|
||||||
@ -107,8 +106,8 @@ class ApiRequestEventletTest(base.BaseTestCase):
|
|||||||
|
|
||||||
def test_run_and_timeout(self):
|
def test_run_and_timeout(self):
|
||||||
def my_handle_request():
|
def my_handle_request():
|
||||||
LOG.info(_LI('my_handle_request() self: %s'), self.req)
|
LOG.info('my_handle_request() self: %s', self.req)
|
||||||
LOG.info(_LI('my_handle_request() dir(self): %s'), dir(self.req))
|
LOG.info('my_handle_request() dir(self): %s', dir(self.req))
|
||||||
eventlet.greenthread.sleep(REQUEST_TIMEOUT * 2)
|
eventlet.greenthread.sleep(REQUEST_TIMEOUT * 2)
|
||||||
|
|
||||||
with mock.patch.object(
|
with mock.patch.object(
|
||||||
@ -330,5 +329,5 @@ class ApiRequestEventletTest(base.BaseTestCase):
|
|||||||
{ "role": "api_provider",
|
{ "role": "api_provider",
|
||||||
"listen_addr": "pssl:1.1.1.1:1" }]}]}"""
|
"listen_addr": "pssl:1.1.1.1:1" }]}]}"""
|
||||||
r.successful = mock.Mock(return_value=True)
|
r.successful = mock.Mock(return_value=True)
|
||||||
LOG.info(_LI('%s'), r.api_providers())
|
LOG.info('%s', r.api_providers())
|
||||||
self.assertIsNotNone(r.api_providers())
|
self.assertIsNotNone(r.api_providers())
|
||||||
|
@ -27,16 +27,6 @@ _C = _translators.contextual_form
|
|||||||
# The plural translation function using the name "_P"
|
# The plural translation function using the name "_P"
|
||||||
_P = _translators.plural_form
|
_P = _translators.plural_form
|
||||||
|
|
||||||
# Translators for log levels.
|
|
||||||
#
|
|
||||||
# The abbreviated names are meant to reflect the usual use of a short
|
|
||||||
# name like '_'. The "L" is for "log" and the other letter comes from
|
|
||||||
# the level.
|
|
||||||
_LI = _translators.log_info
|
|
||||||
_LW = _translators.log_warning
|
|
||||||
_LE = _translators.log_error
|
|
||||||
_LC = _translators.log_critical
|
|
||||||
|
|
||||||
|
|
||||||
def get_available_languages():
|
def get_available_languages():
|
||||||
return oslo_i18n.get_available_languages(DOMAIN)
|
return oslo_i18n.get_available_languages(DOMAIN)
|
||||||
|
@ -21,8 +21,6 @@ from tempest import config
|
|||||||
from tempest import test
|
from tempest import test
|
||||||
|
|
||||||
from vmware_nsx_tempest._i18n import _
|
from vmware_nsx_tempest._i18n import _
|
||||||
from vmware_nsx_tempest._i18n import _LI
|
|
||||||
from vmware_nsx_tempest._i18n import _LW
|
|
||||||
from vmware_nsx_tempest.common import constants
|
from vmware_nsx_tempest.common import constants
|
||||||
from vmware_nsx_tempest.services import l2_gateway_client
|
from vmware_nsx_tempest.services import l2_gateway_client
|
||||||
from vmware_nsx_tempest.services import l2_gateway_connection_client
|
from vmware_nsx_tempest.services import l2_gateway_connection_client
|
||||||
@ -140,7 +138,7 @@ class BaseL2GatewayTest(base.BaseAdminNetworkTest):
|
|||||||
_params = manager.default_params_withy_timeout_values.copy()
|
_params = manager.default_params_withy_timeout_values.copy()
|
||||||
except AttributeError as attribute_err:
|
except AttributeError as attribute_err:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW("Failed to locate the attribute, Error: %(err_msg)s") %
|
"Failed to locate the attribute, Error: %(err_msg)s",
|
||||||
{"err_msg": attribute_err.__str__()})
|
{"err_msg": attribute_err.__str__()})
|
||||||
_params = {}
|
_params = {}
|
||||||
cls.l2gw_client = l2_gateway_client.L2GatewayClient(
|
cls.l2gw_client = l2_gateway_client.L2GatewayClient(
|
||||||
@ -191,7 +189,7 @@ class BaseL2GatewayTest(base.BaseAdminNetworkTest):
|
|||||||
|
|
||||||
:return: response of L2GW create API
|
:return: response of L2GW create API
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("l2gw name: %(name)s, l2gw_param: %(devices)s ") %
|
LOG.info("l2gw name: %(name)s, l2gw_param: %(devices)s ",
|
||||||
{"name": l2gw_name, "devices": l2gw_param})
|
{"name": l2gw_name, "devices": l2gw_param})
|
||||||
devices = []
|
devices = []
|
||||||
for device_dict in l2gw_param:
|
for device_dict in l2gw_param:
|
||||||
@ -203,10 +201,10 @@ class BaseL2GatewayTest(base.BaseAdminNetworkTest):
|
|||||||
"interfaces": interface}
|
"interfaces": interface}
|
||||||
devices.append(device)
|
devices.append(device)
|
||||||
l2gw_request_body = {"devices": devices}
|
l2gw_request_body = {"devices": devices}
|
||||||
LOG.info(_LI(" l2gw_request_body: %s") % l2gw_request_body)
|
LOG.info(" l2gw_request_body: %s", l2gw_request_body)
|
||||||
rsp = self.l2gw_client.create_l2_gateway(
|
rsp = self.l2gw_client.create_l2_gateway(
|
||||||
name=l2gw_name, **l2gw_request_body)
|
name=l2gw_name, **l2gw_request_body)
|
||||||
LOG.info(_LI(" l2gw response: %s") % rsp)
|
LOG.info(" l2gw response: %s", rsp)
|
||||||
self.l2gw_created[rsp[constants.L2GW]["id"]] = rsp[constants.L2GW]
|
self.l2gw_created[rsp[constants.L2GW]["id"]] = rsp[constants.L2GW]
|
||||||
return rsp, devices
|
return rsp, devices
|
||||||
|
|
||||||
@ -218,9 +216,9 @@ class BaseL2GatewayTest(base.BaseAdminNetworkTest):
|
|||||||
|
|
||||||
:return: response of the l2gw delete API.
|
:return: response of the l2gw delete API.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("L2GW id: %(id)s to be deleted.") % {"id": l2gw_id})
|
LOG.info("L2GW id: %(id)s to be deleted.", {"id": l2gw_id})
|
||||||
rsp = self.l2gw_client.delete_l2_gateway(l2gw_id)
|
rsp = self.l2gw_client.delete_l2_gateway(l2gw_id)
|
||||||
LOG.info(_LI("response : %(rsp)s") % {"rsp": rsp})
|
LOG.info("response : %(rsp)s", {"rsp": rsp})
|
||||||
return rsp
|
return rsp
|
||||||
|
|
||||||
def update_l2gw(self, l2gw_id, l2gw_new_name, devices):
|
def update_l2gw(self, l2gw_id, l2gw_new_name, devices):
|
||||||
@ -256,16 +254,16 @@ class BaseL2GatewayTest(base.BaseAdminNetworkTest):
|
|||||||
|
|
||||||
:return: response of L2GWC create API.
|
:return: response of L2GWC create API.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("l2gwc param: %(param)s ") % {"param": l2gwc_param})
|
LOG.info("l2gwc param: %(param)s ", {"param": l2gwc_param})
|
||||||
l2gwc_request_body = {"l2_gateway_id": l2gwc_param["l2_gateway_id"],
|
l2gwc_request_body = {"l2_gateway_id": l2gwc_param["l2_gateway_id"],
|
||||||
"network_id": l2gwc_param["network_id"]}
|
"network_id": l2gwc_param["network_id"]}
|
||||||
if "segmentation_id" in l2gwc_param:
|
if "segmentation_id" in l2gwc_param:
|
||||||
l2gwc_request_body["segmentation_id"] = l2gwc_param[
|
l2gwc_request_body["segmentation_id"] = l2gwc_param[
|
||||||
"segmentation_id"]
|
"segmentation_id"]
|
||||||
LOG.info(_LI("l2gwc_request_body: %s") % l2gwc_request_body)
|
LOG.info("l2gwc_request_body: %s", l2gwc_request_body)
|
||||||
rsp = self.l2gwc_client.create_l2_gateway_connection(
|
rsp = self.l2gwc_client.create_l2_gateway_connection(
|
||||||
**l2gwc_request_body)
|
**l2gwc_request_body)
|
||||||
LOG.info(_LI("l2gwc response: %s") % rsp)
|
LOG.info("l2gwc response: %s", rsp)
|
||||||
self.l2gwc_created[rsp[constants.L2GWC]["id"]] = rsp[constants.L2GWC]
|
self.l2gwc_created[rsp[constants.L2GWC]["id"]] = rsp[constants.L2GWC]
|
||||||
return rsp
|
return rsp
|
||||||
|
|
||||||
@ -277,8 +275,8 @@ class BaseL2GatewayTest(base.BaseAdminNetworkTest):
|
|||||||
|
|
||||||
:return: response of the l2gwc delete API.
|
:return: response of the l2gwc delete API.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("L2GW connection id: %(id)s to be deleted")
|
LOG.info("L2GW connection id: %(id)s to be deleted",
|
||||||
% {"id": l2gwc_id})
|
{"id": l2gwc_id})
|
||||||
rsp = self.l2gwc_client.delete_l2_gateway_connection(l2gwc_id)
|
rsp = self.l2gwc_client.delete_l2_gateway_connection(l2gwc_id)
|
||||||
LOG.info(_LI("response : %(rsp)s") % {"rsp": rsp})
|
LOG.info("response : %(rsp)s", {"rsp": rsp})
|
||||||
return rsp
|
return rsp
|
||||||
|
@ -16,8 +16,6 @@ from oslo_log import log
|
|||||||
|
|
||||||
from tempest.lib.services.network import base
|
from tempest.lib.services.network import base
|
||||||
|
|
||||||
from vmware_nsx_tempest._i18n import _LI
|
|
||||||
from vmware_nsx_tempest._i18n import _LW
|
|
||||||
from vmware_nsx_tempest.common import constants
|
from vmware_nsx_tempest.common import constants
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
@ -36,31 +34,31 @@ class L2GatewayClient(base.BaseNetworkClient):
|
|||||||
def create_l2_gateway(self, **kwargs):
|
def create_l2_gateway(self, **kwargs):
|
||||||
uri = constants.L2_GWS_BASE_URI
|
uri = constants.L2_GWS_BASE_URI
|
||||||
post_data = {constants.L2GW: kwargs}
|
post_data = {constants.L2GW: kwargs}
|
||||||
LOG.info(_LI("URI : %(uri)s, posting data : %(post_data)s") % {
|
LOG.info("URI : %(uri)s, posting data : %(post_data)s",
|
||||||
"uri": uri, "post_data": post_data})
|
{"uri": uri, "post_data": post_data})
|
||||||
return self.create_resource(uri, post_data)
|
return self.create_resource(uri, post_data)
|
||||||
|
|
||||||
def update_l2_gateway(self, l2_gateway_id, **kwargs):
|
def update_l2_gateway(self, l2_gateway_id, **kwargs):
|
||||||
uri = constants.L2_GWS_BASE_URI + "/" + l2_gateway_id
|
uri = constants.L2_GWS_BASE_URI + "/" + l2_gateway_id
|
||||||
post_data = {constants.L2GW: kwargs}
|
post_data = {constants.L2GW: kwargs}
|
||||||
constants.LOG.info(
|
constants.LOG.info(
|
||||||
_LI("URI : %(uri)s, posting data : %(post_data)s") % {
|
"URI : %(uri)s, posting data : %(post_data)s",
|
||||||
"uri": uri, "post_data": post_data})
|
{"uri": uri, "post_data": post_data})
|
||||||
return self.update_resource(uri, post_data)
|
return self.update_resource(uri, post_data)
|
||||||
|
|
||||||
def show_l2_gateway(self, l2_gateway_id, **fields):
|
def show_l2_gateway(self, l2_gateway_id, **fields):
|
||||||
uri = constants.L2_GWS_BASE_URI + "/" + l2_gateway_id
|
uri = constants.L2_GWS_BASE_URI + "/" + l2_gateway_id
|
||||||
LOG.info(_LI("URI : %(uri)s") % {"uri": uri})
|
LOG.info("URI : %(uri)s", {"uri": uri})
|
||||||
return self.show_resource(uri, **fields)
|
return self.show_resource(uri, **fields)
|
||||||
|
|
||||||
def delete_l2_gateway(self, l2_gateway_id):
|
def delete_l2_gateway(self, l2_gateway_id):
|
||||||
uri = constants.L2_GWS_BASE_URI + "/" + l2_gateway_id
|
uri = constants.L2_GWS_BASE_URI + "/" + l2_gateway_id
|
||||||
LOG.info(_LI("URI : %(uri)s") % {"uri": uri})
|
LOG.info("URI : %(uri)s", {"uri": uri})
|
||||||
return self.delete_resource(uri)
|
return self.delete_resource(uri)
|
||||||
|
|
||||||
def list_l2_gateways(self, **filters):
|
def list_l2_gateways(self, **filters):
|
||||||
uri = constants.L2_GWS_BASE_URI
|
uri = constants.L2_GWS_BASE_URI
|
||||||
LOG.info(_LI("URI : %(uri)s") % {"uri": uri})
|
LOG.info("URI : %(uri)s", {"uri": uri})
|
||||||
return self.list_resources(uri, **filters)
|
return self.list_resources(uri, **filters)
|
||||||
|
|
||||||
|
|
||||||
@ -73,7 +71,7 @@ def get_client(client_mgr):
|
|||||||
net_client = getattr(manager, "networks_client")
|
net_client = getattr(manager, "networks_client")
|
||||||
_params = manager.default_params_withy_timeout_values.copy()
|
_params = manager.default_params_withy_timeout_values.copy()
|
||||||
except AttributeError as attribute_err:
|
except AttributeError as attribute_err:
|
||||||
LOG.warning(_LW("Failed to locate the attribute, Error: %(err_msg)s") %
|
LOG.warning("Failed to locate the attribute, Error: %(err_msg)s",
|
||||||
{"err_msg": attribute_err.__str__()})
|
{"err_msg": attribute_err.__str__()})
|
||||||
_params = {}
|
_params = {}
|
||||||
client = L2GatewayClient(net_client.auth_provider,
|
client = L2GatewayClient(net_client.auth_provider,
|
||||||
|
@ -21,10 +21,6 @@ import six.moves.urllib.parse as urlparse
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_serialization import jsonutils
|
from oslo_serialization import jsonutils
|
||||||
|
|
||||||
from vmware_nsx_tempest._i18n import _LE
|
|
||||||
from vmware_nsx_tempest._i18n import _LI
|
|
||||||
from vmware_nsx_tempest._i18n import _LW
|
|
||||||
|
|
||||||
requests.packages.urllib3.disable_warnings()
|
requests.packages.urllib3.disable_warnings()
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -173,8 +169,8 @@ class NSXV3Client(object):
|
|||||||
Return the logical port if found, otherwise return None.
|
Return the logical port if found, otherwise return None.
|
||||||
"""
|
"""
|
||||||
if not os_name:
|
if not os_name:
|
||||||
LOG.error(_LE("Name of OS port should be present "
|
LOG.error("Name of OS port should be present "
|
||||||
"in order to query backend logical port created"))
|
"in order to query backend logical port created")
|
||||||
return None
|
return None
|
||||||
lports = self.get_logical_ports()
|
lports = self.get_logical_ports()
|
||||||
return self.get_nsx_resource_by_name(lports, os_name)
|
return self.get_nsx_resource_by_name(lports, os_name)
|
||||||
@ -217,7 +213,7 @@ class NSXV3Client(object):
|
|||||||
endpoint = "/logical-ports/%s" % p['id']
|
endpoint = "/logical-ports/%s" % p['id']
|
||||||
response = self.put(endpoint=endpoint, body=p)
|
response = self.put(endpoint=endpoint, body=p)
|
||||||
if response.status_code != requests.codes.ok:
|
if response.status_code != requests.codes.ok:
|
||||||
LOG.error(_LE("Failed to update lport %s"), p['id'])
|
LOG.error("Failed to update lport %s", p['id'])
|
||||||
|
|
||||||
def cleanup_os_logical_ports(self):
|
def cleanup_os_logical_ports(self):
|
||||||
"""
|
"""
|
||||||
@ -225,7 +221,7 @@ class NSXV3Client(object):
|
|||||||
"""
|
"""
|
||||||
lports = self.get_logical_ports()
|
lports = self.get_logical_ports()
|
||||||
os_lports = self.get_os_resources(lports)
|
os_lports = self.get_os_resources(lports)
|
||||||
LOG.info(_LI("Number of OS Logical Ports to be deleted: %s"),
|
LOG.info("Number of OS Logical Ports to be deleted: %s",
|
||||||
len(os_lports))
|
len(os_lports))
|
||||||
# logical port vif detachment
|
# logical port vif detachment
|
||||||
self.update_logical_port_attachment(os_lports)
|
self.update_logical_port_attachment(os_lports)
|
||||||
@ -233,10 +229,10 @@ class NSXV3Client(object):
|
|||||||
endpoint = '/logical-ports/%s' % p['id']
|
endpoint = '/logical-ports/%s' % p['id']
|
||||||
response = self.delete(endpoint=endpoint)
|
response = self.delete(endpoint=endpoint)
|
||||||
if response.status_code == requests.codes.ok:
|
if response.status_code == requests.codes.ok:
|
||||||
LOG.info(_LI("Successfully deleted logical port %s"), p['id'])
|
LOG.info("Successfully deleted logical port %s", p['id'])
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("Failed to delete lport %(port_id)s, response "
|
LOG.error("Failed to delete lport %(port_id)s, response "
|
||||||
"code %(code)s"),
|
"code %(code)s",
|
||||||
{'port_id': p['id'], 'code': response.status_code})
|
{'port_id': p['id'], 'code': response.status_code})
|
||||||
|
|
||||||
def get_os_resources(self, resources):
|
def get_os_resources(self, resources):
|
||||||
@ -258,14 +254,14 @@ class NSXV3Client(object):
|
|||||||
nsx_resource = [n for n in nsx_resources if
|
nsx_resource = [n for n in nsx_resources if
|
||||||
n['display_name'] == nsx_name]
|
n['display_name'] == nsx_name]
|
||||||
if len(nsx_resource) == 0:
|
if len(nsx_resource) == 0:
|
||||||
LOG.warning(_LW("Backend nsx resource %s NOT found!"), nsx_name)
|
LOG.warning("Backend nsx resource %s NOT found!", nsx_name)
|
||||||
return None
|
return None
|
||||||
if len(nsx_resource) > 1:
|
if len(nsx_resource) > 1:
|
||||||
LOG.error(_LE("More than 1 nsx resources found: %s!"),
|
LOG.error("More than 1 nsx resources found: %s!",
|
||||||
nsx_resource)
|
nsx_resource)
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Found nsgroup: %s"), nsx_resource[0])
|
LOG.info("Found nsgroup: %s", nsx_resource[0])
|
||||||
return nsx_resource[0]
|
return nsx_resource[0]
|
||||||
|
|
||||||
def get_logical_switches(self):
|
def get_logical_switches(self):
|
||||||
@ -297,8 +293,8 @@ class NSXV3Client(object):
|
|||||||
Return logical switch if found, otherwise return None
|
Return logical switch if found, otherwise return None
|
||||||
"""
|
"""
|
||||||
if not os_name or not os_uuid:
|
if not os_name or not os_uuid:
|
||||||
LOG.error(_LE("Name and uuid of OpenStack L2 network need to be "
|
LOG.error("Name and uuid of OpenStack L2 network need to be "
|
||||||
"present in order to query backend logical switch!"))
|
"present in order to query backend logical switch!")
|
||||||
return None
|
return None
|
||||||
nsx_name = os_name + "_" + os_uuid[:5] + "..." + os_uuid[-5:]
|
nsx_name = os_name + "_" + os_uuid[:5] + "..." + os_uuid[-5:]
|
||||||
lswitches = self.get_logical_switches()
|
lswitches = self.get_logical_switches()
|
||||||
@ -322,9 +318,9 @@ class NSXV3Client(object):
|
|||||||
Get the firewall section by os_name and os_uuid
|
Get the firewall section by os_name and os_uuid
|
||||||
"""
|
"""
|
||||||
if not os_name or not os_uuid:
|
if not os_name or not os_uuid:
|
||||||
LOG.error(_LE("Name and uuid of OS security group should be "
|
LOG.error("Name and uuid of OS security group should be "
|
||||||
"present in order to query backend FW section "
|
"present in order to query backend FW section "
|
||||||
"created"))
|
"created")
|
||||||
return None
|
return None
|
||||||
nsx_name = os_name + " - " + os_uuid
|
nsx_name = os_name + " - " + os_uuid
|
||||||
fw_sections = self.get_firewall_sections()
|
fw_sections = self.get_firewall_sections()
|
||||||
@ -378,8 +374,8 @@ class NSXV3Client(object):
|
|||||||
Return nsgroup if found, otherwise return None
|
Return nsgroup if found, otherwise return None
|
||||||
"""
|
"""
|
||||||
if not os_name or not os_uuid:
|
if not os_name or not os_uuid:
|
||||||
LOG.error(_LE("Name and uuid of OS security group should be "
|
LOG.error("Name and uuid of OS security group should be "
|
||||||
"present in order to query backend nsgroup created"))
|
"present in order to query backend nsgroup created")
|
||||||
return None
|
return None
|
||||||
nsx_name = os_name + " - " + os_uuid
|
nsx_name = os_name + " - " + os_uuid
|
||||||
nsgroups = self.get_ns_groups()
|
nsgroups = self.get_ns_groups()
|
||||||
@ -404,8 +400,8 @@ class NSXV3Client(object):
|
|||||||
Return the logical router if found, otherwise return None.
|
Return the logical router if found, otherwise return None.
|
||||||
"""
|
"""
|
||||||
if not os_name or not os_uuid:
|
if not os_name or not os_uuid:
|
||||||
LOG.error(_LE("Name and uuid of OS router should be present "
|
LOG.error("Name and uuid of OS router should be present "
|
||||||
"in order to query backend logical router created"))
|
"in order to query backend logical router created")
|
||||||
return None
|
return None
|
||||||
nsx_name = os_name + "_" + os_uuid[:5] + "..." + os_uuid[-5:]
|
nsx_name = os_name + "_" + os_uuid[:5] + "..." + os_uuid[-5:]
|
||||||
lrouters = self.get_logical_routers()
|
lrouters = self.get_logical_routers()
|
||||||
@ -423,8 +419,8 @@ class NSXV3Client(object):
|
|||||||
Get all user defined NAT rules of the specific logical router
|
Get all user defined NAT rules of the specific logical router
|
||||||
"""
|
"""
|
||||||
if not lrouter:
|
if not lrouter:
|
||||||
LOG.error(_LE("Logical router needs to be present in order "
|
LOG.error("Logical router needs to be present in order "
|
||||||
"to get the NAT rules"))
|
"to get the NAT rules")
|
||||||
return None
|
return None
|
||||||
endpoint = "/logical-routers/%s/nat/rules" % lrouter['id']
|
endpoint = "/logical-routers/%s/nat/rules" % lrouter['id']
|
||||||
return self.get_logical_resources(endpoint)
|
return self.get_logical_resources(endpoint)
|
||||||
@ -432,8 +428,8 @@ class NSXV3Client(object):
|
|||||||
def get_logical_router_advertisement(self, lrouter):
|
def get_logical_router_advertisement(self, lrouter):
|
||||||
"""Get logical router advertisement"""
|
"""Get logical router advertisement"""
|
||||||
if not lrouter:
|
if not lrouter:
|
||||||
LOG.error(_LE("Logical router needs to be present in order "
|
LOG.error("Logical router needs to be present in order "
|
||||||
"to get router advertisement!"))
|
"to get router advertisement!")
|
||||||
return None
|
return None
|
||||||
endpoint = "/logical-routers/%s/routing/advertisement" % lrouter['id']
|
endpoint = "/logical-routers/%s/routing/advertisement" % lrouter['id']
|
||||||
response = self.get(endpoint)
|
response = self.get(endpoint)
|
||||||
@ -454,9 +450,9 @@ class NSXV3Client(object):
|
|||||||
Return logical dhcp server if found, otherwise return None
|
Return logical dhcp server if found, otherwise return None
|
||||||
"""
|
"""
|
||||||
if not os_name or not os_uuid:
|
if not os_name or not os_uuid:
|
||||||
LOG.error(_LE("Name and uuid of OpenStack L2 network need to be "
|
LOG.error("Name and uuid of OpenStack L2 network need to be "
|
||||||
"present in order to query backend logical dhcp "
|
"present in order to query backend logical dhcp "
|
||||||
"server!"))
|
"server!")
|
||||||
return None
|
return None
|
||||||
nsx_name = os_name + "_" + os_uuid[:5] + "..." + os_uuid[-5:]
|
nsx_name = os_name + "_" + os_uuid[:5] + "..." + os_uuid[-5:]
|
||||||
dhcp_servers = self.get_logical_dhcp_servers()
|
dhcp_servers = self.get_logical_dhcp_servers()
|
||||||
|
@ -26,7 +26,6 @@ from tempest.lib.common.utils import data_utils
|
|||||||
from tempest.lib.common.utils import test_utils
|
from tempest.lib.common.utils import test_utils
|
||||||
from tempest.lib import exceptions
|
from tempest.lib import exceptions
|
||||||
|
|
||||||
from vmware_nsx_tempest._i18n import _LI
|
|
||||||
from vmware_nsx_tempest.services.lbaas import health_monitors_client
|
from vmware_nsx_tempest.services.lbaas import health_monitors_client
|
||||||
from vmware_nsx_tempest.services.lbaas import l7policies_client
|
from vmware_nsx_tempest.services.lbaas import l7policies_client
|
||||||
from vmware_nsx_tempest.services.lbaas import l7rules_client
|
from vmware_nsx_tempest.services.lbaas import l7rules_client
|
||||||
@ -153,12 +152,12 @@ class BaseTestCase(base.BaseNetworkTest):
|
|||||||
super(BaseTestCase, cls).setUpClass()
|
super(BaseTestCase, cls).setUpClass()
|
||||||
|
|
||||||
def setUp(cls):
|
def setUp(cls):
|
||||||
cls.LOG.info(_LI('Starting: {0}').format(cls._testMethodName))
|
cls.LOG.info(('Starting: {0}').format(cls._testMethodName))
|
||||||
super(BaseTestCase, cls).setUp()
|
super(BaseTestCase, cls).setUp()
|
||||||
|
|
||||||
def tearDown(cls):
|
def tearDown(cls):
|
||||||
super(BaseTestCase, cls).tearDown()
|
super(BaseTestCase, cls).tearDown()
|
||||||
cls.LOG.info(_LI('Finished: {0}').format(cls._testMethodName))
|
cls.LOG.info(('Finished: {0}').format(cls._testMethodName))
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _create_load_balancer(cls, wait=True, **lb_kwargs):
|
def _create_load_balancer(cls, wait=True, **lb_kwargs):
|
||||||
|
@ -21,8 +21,6 @@ from tempest.lib import decorators
|
|||||||
from tempest.lib import exceptions as lib_exc
|
from tempest.lib import exceptions as lib_exc
|
||||||
from tempest import test
|
from tempest import test
|
||||||
|
|
||||||
from vmware_nsx_tempest._i18n import _LI
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
CONF = config.CONF
|
CONF = config.CONF
|
||||||
@ -261,5 +259,5 @@ class SubnetPoolsTestJSON(base.BaseNetworkTest):
|
|||||||
self.clean_subnet(subnet_client, subnet['subnet']['id'])
|
self.clean_subnet(subnet_client, subnet['subnet']['id'])
|
||||||
except lib_exc.ServerFault:
|
except lib_exc.ServerFault:
|
||||||
pass
|
pass
|
||||||
LOG.info(_LI("Failed to allocate subnet: Insufficient "
|
LOG.info("Failed to allocate subnet: Insufficient "
|
||||||
"prefix space to allocate subnet size"))
|
"prefix space to allocate subnet size")
|
||||||
|
@ -28,7 +28,6 @@ from tempest.lib import decorators
|
|||||||
from tempest.lib import exceptions
|
from tempest.lib import exceptions
|
||||||
from tempest import test
|
from tempest import test
|
||||||
|
|
||||||
from vmware_nsx_tempest._i18n import _LI
|
|
||||||
from vmware_nsx_tempest.services import nsxv_client
|
from vmware_nsx_tempest.services import nsxv_client
|
||||||
from vmware_nsx_tempest.tests.nsxv.scenario import (
|
from vmware_nsx_tempest.tests.nsxv.scenario import (
|
||||||
manager_topo_deployment as dmgr)
|
manager_topo_deployment as dmgr)
|
||||||
@ -125,14 +124,14 @@ class TestDHCP121BasicOps(dmgr.TopoDeployScenarioManager):
|
|||||||
cmd = ('/sbin/route -n')
|
cmd = ('/sbin/route -n')
|
||||||
out_data = client.exec_command(cmd)
|
out_data = client.exec_command(cmd)
|
||||||
self.assertIn(Metadataserver_ip, out_data)
|
self.assertIn(Metadataserver_ip, out_data)
|
||||||
LOG.info(_LI("Metadata routes available on vm"))
|
LOG.info("Metadata routes available on vm")
|
||||||
cmd = ('wget http://169.254.169.254 -O sample.txt')
|
cmd = ('wget http://169.254.169.254 -O sample.txt')
|
||||||
client.exec_command(cmd)
|
client.exec_command(cmd)
|
||||||
cmd = ('cat sample.txt')
|
cmd = ('cat sample.txt')
|
||||||
out_data = client.exec_command(cmd)
|
out_data = client.exec_command(cmd)
|
||||||
# Check metadata server inforamtion available or not
|
# Check metadata server inforamtion available or not
|
||||||
self.assertIn('latest', out_data)
|
self.assertIn('latest', out_data)
|
||||||
LOG.info(_LI("metadata server is acessible"))
|
LOG.info("metadata server is acessible")
|
||||||
# Fetch dhcp edge infor from nsx-v
|
# Fetch dhcp edge infor from nsx-v
|
||||||
exc_edge = self.vsm.get_dhcp_edge_info()
|
exc_edge = self.vsm.get_dhcp_edge_info()
|
||||||
self.assertIsNotNone(exc_edge)
|
self.assertIsNotNone(exc_edge)
|
||||||
@ -186,7 +185,7 @@ class TestDHCP121BasicOps(dmgr.TopoDeployScenarioManager):
|
|||||||
self.assertIn(
|
self.assertIn(
|
||||||
_subnet_data['new_host_routes'][0]['nexthop'], out_data)
|
_subnet_data['new_host_routes'][0]['nexthop'], out_data)
|
||||||
self.assertIn(self.nexthop_host_route, out_data)
|
self.assertIn(self.nexthop_host_route, out_data)
|
||||||
LOG.info(_LI("Host routes available on vm"))
|
LOG.info("Host routes available on vm")
|
||||||
# Check Host route info at beckend
|
# Check Host route info at beckend
|
||||||
exc_edge = self.vsm.get_dhcp_edge_info()
|
exc_edge = self.vsm.get_dhcp_edge_info()
|
||||||
self.assertIsNotNone(exc_edge)
|
self.assertIsNotNone(exc_edge)
|
||||||
@ -200,7 +199,7 @@ class TestDHCP121BasicOps(dmgr.TopoDeployScenarioManager):
|
|||||||
dest_subnet = destination_net['destinationSubnet']
|
dest_subnet = destination_net['destinationSubnet']
|
||||||
dest_router = destination_net['router']
|
dest_router = destination_net['router']
|
||||||
if (dest in dest_subnet and self.nexthop1 in dest_router):
|
if (dest in dest_subnet and self.nexthop1 in dest_router):
|
||||||
LOG.info(_LI("Host routes available on nsxv"))
|
LOG.info("Host routes available on nsxv")
|
||||||
# Update subnet with no host-routes
|
# Update subnet with no host-routes
|
||||||
_subnet_data1 = {'new_host_routes': []}
|
_subnet_data1 = {'new_host_routes': []}
|
||||||
new_host_routes = _subnet_data1['new_host_routes']
|
new_host_routes = _subnet_data1['new_host_routes']
|
||||||
@ -227,7 +226,7 @@ class TestDHCP121BasicOps(dmgr.TopoDeployScenarioManager):
|
|||||||
for destination_net in dhcp_options_info:
|
for destination_net in dhcp_options_info:
|
||||||
if (_subnet_data['new_host_routes'][0]['destination']
|
if (_subnet_data['new_host_routes'][0]['destination']
|
||||||
not in destination_net['destinationSubnet']):
|
not in destination_net['destinationSubnet']):
|
||||||
LOG.info(_LI("Host routes not available on nsxv"))
|
LOG.info("Host routes not available on nsxv")
|
||||||
project_dict = dict(security_group=vm_env['security_group'],
|
project_dict = dict(security_group=vm_env['security_group'],
|
||||||
network=vm_env['network'], subnet=vm_env['subnet'],
|
network=vm_env['network'], subnet=vm_env['subnet'],
|
||||||
router=vm_env['router'],
|
router=vm_env['router'],
|
||||||
@ -297,30 +296,30 @@ class TestDhcpMetadata(TestDHCP121BasicOps):
|
|||||||
@test.attr(type='nsxv')
|
@test.attr(type='nsxv')
|
||||||
@decorators.idempotent_id('95d06aba-895f-47f8-b47d-ae48c6853a85')
|
@decorators.idempotent_id('95d06aba-895f-47f8-b47d-ae48c6853a85')
|
||||||
def test_dhcp_121_metadata_check_on_vm_nsxv(self):
|
def test_dhcp_121_metadata_check_on_vm_nsxv(self):
|
||||||
LOG.info(_LI("Testcase DHCP-121 option metadata check on vm and \
|
LOG.info("Testcase DHCP-121 option metadata check on vm and "
|
||||||
on nsx deploying"))
|
"on nsx deploying")
|
||||||
self.vm_env = self.setup_vm_enviornment(self.manager, 'green', True)
|
self.vm_env = self.setup_vm_enviornment(self.manager, 'green', True)
|
||||||
self.green = self.dhcp_121_metadata_hostroutes_check_on_vm_nsxv(
|
self.green = self.dhcp_121_metadata_hostroutes_check_on_vm_nsxv(
|
||||||
self.vm_env)
|
self.vm_env)
|
||||||
LOG.info(_LI("Testcase DHCP-121 option metadata check on vm and on \
|
LOG.info("Testcase DHCP-121 option metadata check on vm and on "
|
||||||
nsx completed"))
|
"nsx completed")
|
||||||
|
|
||||||
|
|
||||||
class TestDhcpHostroutesClear(TestDHCP121BasicOps):
|
class TestDhcpHostroutesClear(TestDHCP121BasicOps):
|
||||||
@test.attr(type='nsxv')
|
@test.attr(type='nsxv')
|
||||||
@decorators.idempotent_id('6bec6eb4-8632-493d-a895-a3ee87cb3002')
|
@decorators.idempotent_id('6bec6eb4-8632-493d-a895-a3ee87cb3002')
|
||||||
def test_dhcp_121_hostroutes_clear(self):
|
def test_dhcp_121_hostroutes_clear(self):
|
||||||
LOG.info(_LI("Testcase DHCP-121 option host routes clear deploying"))
|
LOG.info("Testcase DHCP-121 option host routes clear deploying")
|
||||||
self.vm_env = self.setup_vm_enviornment(self.manager, 'green', True)
|
self.vm_env = self.setup_vm_enviornment(self.manager, 'green', True)
|
||||||
self.green = self.dhcp_121_hostroutes_clear(self.vm_env)
|
self.green = self.dhcp_121_hostroutes_clear(self.vm_env)
|
||||||
LOG.info(_LI("Testcase DHCP-121 option host routes clear completed"))
|
LOG.info("Testcase DHCP-121 option host routes clear completed")
|
||||||
|
|
||||||
|
|
||||||
class TestDhcpNegative(TestDHCP121BasicOps):
|
class TestDhcpNegative(TestDHCP121BasicOps):
|
||||||
@test.attr(type='nsxv')
|
@test.attr(type='nsxv')
|
||||||
@decorators.idempotent_id('a58dc6c5-9f28-4184-baf7-37ded52593c4')
|
@decorators.idempotent_id('a58dc6c5-9f28-4184-baf7-37ded52593c4')
|
||||||
def test_dhcp121_negative_test(self):
|
def test_dhcp121_negative_test(self):
|
||||||
LOG.info(_LI("Testcase DHCP-121 option negative test deploying"))
|
LOG.info("Testcase DHCP-121 option negative test deploying")
|
||||||
t_net_id, t_network, t_subnet =\
|
t_net_id, t_network, t_subnet =\
|
||||||
self.create_project_network_subnet('admin')
|
self.create_project_network_subnet('admin')
|
||||||
subnet_id = t_subnet['id']
|
subnet_id = t_subnet['id']
|
||||||
@ -349,9 +348,9 @@ class TestDhcpNegative(TestDHCP121BasicOps):
|
|||||||
except exceptions.BadRequest:
|
except exceptions.BadRequest:
|
||||||
e = sys.exc_info()[0].__dict__['message']
|
e = sys.exc_info()[0].__dict__['message']
|
||||||
if (e == "Bad request"):
|
if (e == "Bad request"):
|
||||||
LOG.info(_LI("Invalid input for operation:\
|
LOG.info("Invalid input for operation: "
|
||||||
Host routes can only be supported when\
|
"Host routes can only be supported when "
|
||||||
DHCP is enabled"))
|
"DHCP is enabled")
|
||||||
pass
|
pass
|
||||||
subnet_id = t_subnet['id']
|
subnet_id = t_subnet['id']
|
||||||
kwargs = {'enable_dhcp': 'true'}
|
kwargs = {'enable_dhcp': 'true'}
|
||||||
@ -379,16 +378,16 @@ class TestDhcpNegative(TestDHCP121BasicOps):
|
|||||||
except exceptions.BadRequest:
|
except exceptions.BadRequest:
|
||||||
e = sys.exc_info()[0].__dict__['message']
|
e = sys.exc_info()[0].__dict__['message']
|
||||||
if (e == "Bad request"):
|
if (e == "Bad request"):
|
||||||
LOG.info(_LI("Can't disable DHCP while using host routes"))
|
LOG.info("Can't disable DHCP while using host routes")
|
||||||
pass
|
pass
|
||||||
LOG.info(_LI("Testcase DHCP-121 option negative test completed"))
|
LOG.info("Testcase DHCP-121 option negative test completed")
|
||||||
|
|
||||||
|
|
||||||
class TestDhcpMultiHostRoute(TestDHCP121BasicOps):
|
class TestDhcpMultiHostRoute(TestDHCP121BasicOps):
|
||||||
@test.attr(type='nsxv')
|
@test.attr(type='nsxv')
|
||||||
@decorators.idempotent_id('c3ca96d7-b704-4d94-b42d-e7bae94b82cd')
|
@decorators.idempotent_id('c3ca96d7-b704-4d94-b42d-e7bae94b82cd')
|
||||||
def test_dhcp121_multi_host_route(self):
|
def test_dhcp121_multi_host_route(self):
|
||||||
LOG.info(_LI("Testcase DHCP-121 option multi host routes deploying"))
|
LOG.info("Testcase DHCP-121 option multi host routes deploying")
|
||||||
t_net_id, t_network, t_subnet =\
|
t_net_id, t_network, t_subnet =\
|
||||||
self.create_project_network_subnet('admin')
|
self.create_project_network_subnet('admin')
|
||||||
# Fetch next hop information from tempest.conf
|
# Fetch next hop information from tempest.conf
|
||||||
@ -448,8 +447,8 @@ class TestDhcpMultiHostRoute(TestDHCP121BasicOps):
|
|||||||
subnet host_routes equal to 19 or not
|
subnet host_routes equal to 19 or not
|
||||||
'''
|
'''
|
||||||
if (len(subnet['subnet']['host_routes']) == 19):
|
if (len(subnet['subnet']['host_routes']) == 19):
|
||||||
LOG.info(_LI("Multiple entries for host routes available"))
|
LOG.info("Multiple entries for host routes available")
|
||||||
LOG.info(_LI("Testcase DHCP-121 option multi host routes completed"))
|
LOG.info("Testcase DHCP-121 option multi host routes completed")
|
||||||
|
|
||||||
|
|
||||||
class TestDhcpHostRoutesBetweenVms(TestDHCP121BasicOps):
|
class TestDhcpHostRoutesBetweenVms(TestDHCP121BasicOps):
|
||||||
|
@ -23,7 +23,6 @@ from tempest.lib.common.utils import test_utils
|
|||||||
from tempest.lib import decorators
|
from tempest.lib import decorators
|
||||||
from tempest import test
|
from tempest import test
|
||||||
|
|
||||||
from vmware_nsx_tempest._i18n import _LI
|
|
||||||
from vmware_nsx_tempest.services import nsxv_client
|
from vmware_nsx_tempest.services import nsxv_client
|
||||||
from vmware_nsx_tempest.tests.nsxv.scenario import (
|
from vmware_nsx_tempest.tests.nsxv.scenario import (
|
||||||
manager_topo_deployment as dmgr)
|
manager_topo_deployment as dmgr)
|
||||||
@ -195,7 +194,7 @@ class TestSpoofGuardFeature(TestSpoofGuardBasicOps):
|
|||||||
exclude_list = [item.encode('utf-8') for item in items]
|
exclude_list = [item.encode('utf-8') for item in items]
|
||||||
for exclude_vm in exclude_list:
|
for exclude_vm in exclude_list:
|
||||||
if vm_id in exclude_vm:
|
if vm_id in exclude_vm:
|
||||||
LOG.info(_LI("Vm in exclude list"))
|
LOG.info("Vm in exclude list")
|
||||||
# Update Port security to disabled
|
# Update Port security to disabled
|
||||||
port_client.update_port(
|
port_client.update_port(
|
||||||
port_id=port_id,
|
port_id=port_id,
|
||||||
@ -204,7 +203,7 @@ class TestSpoofGuardFeature(TestSpoofGuardBasicOps):
|
|||||||
exclude_list = [item.encode('utf-8') for item in items]
|
exclude_list = [item.encode('utf-8') for item in items]
|
||||||
if exclude_vm in exclude_list:
|
if exclude_vm in exclude_list:
|
||||||
if vm_id not in exclude_vm:
|
if vm_id not in exclude_vm:
|
||||||
LOG.info(_LI("Vm not in exclude list"))
|
LOG.info("Vm not in exclude list")
|
||||||
# Detach interface from vm
|
# Detach interface from vm
|
||||||
self.interface_client.delete_interface(vm_id, port_id)
|
self.interface_client.delete_interface(vm_id, port_id)
|
||||||
|
|
||||||
@ -315,7 +314,7 @@ class TestSpoofGuardFeature(TestSpoofGuardBasicOps):
|
|||||||
exclude_list = [item.encode('utf-8') for item in items]
|
exclude_list = [item.encode('utf-8') for item in items]
|
||||||
for exclude_vm in exclude_list:
|
for exclude_vm in exclude_list:
|
||||||
if vm_id in exclude_vm:
|
if vm_id in exclude_vm:
|
||||||
LOG.info(_LI("Vm1 in exclude list"))
|
LOG.info("Vm1 in exclude list")
|
||||||
vm2_id = t_serv2['id']
|
vm2_id = t_serv2['id']
|
||||||
# Update vm2 port to disable port security
|
# Update vm2 port to disable port security
|
||||||
port_client.update_port(
|
port_client.update_port(
|
||||||
@ -326,7 +325,7 @@ class TestSpoofGuardFeature(TestSpoofGuardBasicOps):
|
|||||||
# Check vm2 in exclude list or not
|
# Check vm2 in exclude list or not
|
||||||
for exclude_vm in exclude_list:
|
for exclude_vm in exclude_list:
|
||||||
if vm2_id in exclude_vm:
|
if vm2_id in exclude_vm:
|
||||||
LOG.info(_LI("Vm2 in exclude list"))
|
LOG.info("Vm2 in exclude list")
|
||||||
vm3_id = t_serv3['id']
|
vm3_id = t_serv3['id']
|
||||||
# Update vm3 port to enable port security
|
# Update vm3 port to enable port security
|
||||||
port_client.update_port(
|
port_client.update_port(
|
||||||
@ -337,7 +336,7 @@ class TestSpoofGuardFeature(TestSpoofGuardBasicOps):
|
|||||||
# Check vm3 in exclude list or not
|
# Check vm3 in exclude list or not
|
||||||
for exclude_vm in exclude_list:
|
for exclude_vm in exclude_list:
|
||||||
if vm3_id in exclude_vm:
|
if vm3_id in exclude_vm:
|
||||||
LOG.info(_LI("Vm3 in exclude list"))
|
LOG.info("Vm3 in exclude list")
|
||||||
# Update vm1 port to enable port security
|
# Update vm1 port to enable port security
|
||||||
port_client.update_port(
|
port_client.update_port(
|
||||||
port_id=port1_id,
|
port_id=port1_id,
|
||||||
@ -347,7 +346,7 @@ class TestSpoofGuardFeature(TestSpoofGuardBasicOps):
|
|||||||
# Check vm should not be in exclude list
|
# Check vm should not be in exclude list
|
||||||
for exclude_vm in exclude_list:
|
for exclude_vm in exclude_list:
|
||||||
if vm_id not in exclude_vm:
|
if vm_id not in exclude_vm:
|
||||||
LOG.info(_LI("Vm1 not in exclude list"))
|
LOG.info("Vm1 not in exclude list")
|
||||||
|
|
||||||
@test.attr(type='nsxv')
|
@test.attr(type='nsxv')
|
||||||
@decorators.idempotent_id('f034d3e9-d717-4bcd-8e6e-18e9ada7b81a')
|
@decorators.idempotent_id('f034d3e9-d717-4bcd-8e6e-18e9ada7b81a')
|
||||||
@ -365,7 +364,7 @@ class TestSpoofGuardFeature(TestSpoofGuardBasicOps):
|
|||||||
exclude_list = [item.encode('utf-8') for item in items]
|
exclude_list = [item.encode('utf-8') for item in items]
|
||||||
for exclude_vm in exclude_list:
|
for exclude_vm in exclude_list:
|
||||||
if vm_id in exclude_vm:
|
if vm_id in exclude_vm:
|
||||||
LOG.info(_LI("Vm in exclude list"))
|
LOG.info("Vm in exclude list")
|
||||||
port_client.update_port(
|
port_client.update_port(
|
||||||
port_id=port_id,
|
port_id=port_id,
|
||||||
port_security_enabled='true')
|
port_security_enabled='true')
|
||||||
@ -373,7 +372,7 @@ class TestSpoofGuardFeature(TestSpoofGuardBasicOps):
|
|||||||
exclude_list = [item.encode('utf-8') for item in items]
|
exclude_list = [item.encode('utf-8') for item in items]
|
||||||
if exclude_vm in exclude_list:
|
if exclude_vm in exclude_list:
|
||||||
if vm_id not in exclude_vm:
|
if vm_id not in exclude_vm:
|
||||||
LOG.info(_LI("Vm not in exclude list"))
|
LOG.info("Vm not in exclude list")
|
||||||
self.interface_client.delete_interface(vm_id, port_id)
|
self.interface_client.delete_interface(vm_id, port_id)
|
||||||
|
|
||||||
@test.attr(type='nsxv')
|
@test.attr(type='nsxv')
|
||||||
@ -394,7 +393,7 @@ class TestSpoofGuardFeature(TestSpoofGuardBasicOps):
|
|||||||
# Check port security of created port
|
# Check port security of created port
|
||||||
port_details = port_client.show_port(port_id=port_id)
|
port_details = port_client.show_port(port_id=port_id)
|
||||||
if (port_details['port']['port_security_enabled'] == 'false'):
|
if (port_details['port']['port_security_enabled'] == 'false'):
|
||||||
LOG.info(_LI("Port security of port is disabled"))
|
LOG.info("Port security of port is disabled")
|
||||||
kwargs = {'port_security_enabled': 'true'}
|
kwargs = {'port_security_enabled': 'true'}
|
||||||
# Update port security of network to enabled
|
# Update port security of network to enabled
|
||||||
network_client.update_network(network_id=net_id, **kwargs)
|
network_client.update_network(network_id=net_id, **kwargs)
|
||||||
@ -404,7 +403,7 @@ class TestSpoofGuardFeature(TestSpoofGuardBasicOps):
|
|||||||
port_id = port['id']
|
port_id = port['id']
|
||||||
port_details = port_client.show_port(port_id=port_id)
|
port_details = port_client.show_port(port_id=port_id)
|
||||||
if (port_details['port']['port_security_enabled'] == 'true'):
|
if (port_details['port']['port_security_enabled'] == 'true'):
|
||||||
LOG.info(_LI("Port security of port is enabled"))
|
LOG.info("Port security of port is enabled")
|
||||||
|
|
||||||
@test.attr(type='nsxv')
|
@test.attr(type='nsxv')
|
||||||
@decorators.idempotent_id('c8683cb7-4be5-4670-95c6-344a0aea3667')
|
@decorators.idempotent_id('c8683cb7-4be5-4670-95c6-344a0aea3667')
|
||||||
@ -425,7 +424,7 @@ class TestSpoofGuardFeature(TestSpoofGuardBasicOps):
|
|||||||
exclude_list = [item.encode('utf-8') for item in items]
|
exclude_list = [item.encode('utf-8') for item in items]
|
||||||
for exclude_vm in exclude_list:
|
for exclude_vm in exclude_list:
|
||||||
if vm_id in exclude_vm:
|
if vm_id in exclude_vm:
|
||||||
LOG.info(_LI("Vm in exclude list"))
|
LOG.info("Vm in exclude list")
|
||||||
name = 'disabled-port-security-port2'
|
name = 'disabled-port-security-port2'
|
||||||
kwargs = {'name': name, 'network_id': net_id,
|
kwargs = {'name': name, 'network_id': net_id,
|
||||||
'port_security_enabled': 'false'}
|
'port_security_enabled': 'false'}
|
||||||
@ -436,7 +435,7 @@ class TestSpoofGuardFeature(TestSpoofGuardBasicOps):
|
|||||||
exclude_list = [item.encode('utf-8') for item in items]
|
exclude_list = [item.encode('utf-8') for item in items]
|
||||||
for exclude_vm in exclude_list:
|
for exclude_vm in exclude_list:
|
||||||
if vm_id in exclude_vm:
|
if vm_id in exclude_vm:
|
||||||
LOG.info(_LI("Vm in exclude list"))
|
LOG.info("Vm in exclude list")
|
||||||
port_client.update_port(
|
port_client.update_port(
|
||||||
port_id=port2_id,
|
port_id=port2_id,
|
||||||
port_security_enabled='true')
|
port_security_enabled='true')
|
||||||
@ -444,7 +443,7 @@ class TestSpoofGuardFeature(TestSpoofGuardBasicOps):
|
|||||||
exclude_list = [item.encode('utf-8') for item in items]
|
exclude_list = [item.encode('utf-8') for item in items]
|
||||||
for exclude_vm in exclude_list:
|
for exclude_vm in exclude_list:
|
||||||
if vm_id in exclude_vm:
|
if vm_id in exclude_vm:
|
||||||
LOG.info(_LI("Vm in exclude list"))
|
LOG.info("Vm in exclude list")
|
||||||
port_client.update_port(
|
port_client.update_port(
|
||||||
port_id=port1_id,
|
port_id=port1_id,
|
||||||
port_security_enabled='true')
|
port_security_enabled='true')
|
||||||
@ -452,6 +451,6 @@ class TestSpoofGuardFeature(TestSpoofGuardBasicOps):
|
|||||||
exclude_list = [item.encode('utf-8') for item in items]
|
exclude_list = [item.encode('utf-8') for item in items]
|
||||||
if exclude_vm in exclude_list:
|
if exclude_vm in exclude_list:
|
||||||
if vm_id not in exclude_vm:
|
if vm_id not in exclude_vm:
|
||||||
LOG.info(_LI("Vm not in exclude list"))
|
LOG.info("Vm not in exclude list")
|
||||||
self.interface_client.delete_interface(vm_id, port1_id)
|
self.interface_client.delete_interface(vm_id, port1_id)
|
||||||
self.interface_client.delete_interface(vm_id, port2_id)
|
self.interface_client.delete_interface(vm_id, port2_id)
|
||||||
|
@ -20,7 +20,6 @@ from tempest.lib.common.utils import data_utils
|
|||||||
from tempest.lib import decorators
|
from tempest.lib import decorators
|
||||||
from tempest import test
|
from tempest import test
|
||||||
|
|
||||||
from vmware_nsx_tempest._i18n import _LI
|
|
||||||
from vmware_nsx_tempest.common import constants
|
from vmware_nsx_tempest.common import constants
|
||||||
from vmware_nsx_tempest.services import base_l2gw
|
from vmware_nsx_tempest.services import base_l2gw
|
||||||
|
|
||||||
@ -42,7 +41,7 @@ class L2GatewayTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
To create l2gw we need bridge cluster name (interface name) and
|
To create l2gw we need bridge cluster name (interface name) and
|
||||||
bridge cluster UUID (device name) from NSX manager.
|
bridge cluster UUID (device name) from NSX manager.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing l2_gateway_create api"))
|
LOG.info("Testing l2_gateway_create api")
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
l2gw_name = data_utils.rand_name(constants.L2GW)
|
l2gw_name = data_utils.rand_name(constants.L2GW)
|
||||||
@ -67,7 +66,7 @@ class L2GatewayTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
To create l2gw we need bridge cluster name (interface name) and
|
To create l2gw we need bridge cluster name (interface name) and
|
||||||
bridge cluster UUID (device name) from NSX manager and vlan id.
|
bridge cluster UUID (device name) from NSX manager and vlan id.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing l2_gateway_create api with segmentation ID"))
|
LOG.info("Testing l2_gateway_create api with segmentation ID")
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
l2gw_name = data_utils.rand_name(constants.L2GW)
|
l2gw_name = data_utils.rand_name(constants.L2GW)
|
||||||
@ -104,7 +103,7 @@ class L2GatewayTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
To create l2gw we need bridge cluster name (interface name) and
|
To create l2gw we need bridge cluster name (interface name) and
|
||||||
bridge cluster UUID (device name) from NSX manager and vlan id.
|
bridge cluster UUID (device name) from NSX manager and vlan id.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing l2_gateway_create api with segmentation ID"))
|
LOG.info("Testing l2_gateway_create api with segmentation ID")
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
l2gw_name = data_utils.rand_name(constants.L2GW)
|
l2gw_name = data_utils.rand_name(constants.L2GW)
|
||||||
@ -140,7 +139,7 @@ class L2GatewayTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
Delete l2gw will create l2gw and delete recently created l2gw. To
|
Delete l2gw will create l2gw and delete recently created l2gw. To
|
||||||
delete l2gw we need l2gw id.
|
delete l2gw we need l2gw id.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing l2_gateway_delete api"))
|
LOG.info("Testing l2_gateway_delete api")
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
l2gw_name = data_utils.rand_name(constants.L2GW)
|
l2gw_name = data_utils.rand_name(constants.L2GW)
|
||||||
@ -171,7 +170,7 @@ class L2GatewayTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
Update l2gw will update info in already created l2gw. To
|
Update l2gw will update info in already created l2gw. To
|
||||||
update l2gw we need l2gw id and payload to update.
|
update l2gw we need l2gw id and payload to update.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing l2_gateway_update api"))
|
LOG.info("Testing l2_gateway_update api")
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
l2gw_name = data_utils.rand_name(constants.L2GW)
|
l2gw_name = data_utils.rand_name(constants.L2GW)
|
||||||
@ -198,7 +197,7 @@ class L2GatewayTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
"code":
|
"code":
|
||||||
constants.EXPECTED_HTTP_RESPONSE_200})
|
constants.EXPECTED_HTTP_RESPONSE_200})
|
||||||
rsp_l2gw = update_rsp[constants.L2GW]
|
rsp_l2gw = update_rsp[constants.L2GW]
|
||||||
LOG.info(_LI("response : %(rsp_l2gw)s") % {"rsp_l2gw": rsp_l2gw})
|
LOG.info("response : %(rsp_l2gw)s", {"rsp_l2gw": rsp_l2gw})
|
||||||
# Assert if name is not updated.
|
# Assert if name is not updated.
|
||||||
self.assertEqual(l2gw_new_name, rsp_l2gw["name"],
|
self.assertEqual(l2gw_new_name, rsp_l2gw["name"],
|
||||||
"l2gw name=%(rsp_name)s is not the same as "
|
"l2gw name=%(rsp_name)s is not the same as "
|
||||||
@ -213,7 +212,7 @@ class L2GatewayTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
Update l2gw will update info in already created l2gw. To
|
Update l2gw will update info in already created l2gw. To
|
||||||
update l2gw we need l2gw id and payload to update.
|
update l2gw we need l2gw id and payload to update.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing l2_gateway_update api"))
|
LOG.info("Testing l2_gateway_update api")
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
l2gw_name = data_utils.rand_name(constants.L2GW)
|
l2gw_name = data_utils.rand_name(constants.L2GW)
|
||||||
@ -243,7 +242,7 @@ class L2GatewayTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
constants.EXPECTED_HTTP_RESPONSE_200})
|
constants.EXPECTED_HTTP_RESPONSE_200})
|
||||||
rsp_l2gw = update_rsp[constants.L2GW]
|
rsp_l2gw = update_rsp[constants.L2GW]
|
||||||
self.l2gw_created[rsp_l2gw["id"]] = rsp_l2gw
|
self.l2gw_created[rsp_l2gw["id"]] = rsp_l2gw
|
||||||
LOG.info(_LI("response : %(rsp_l2gw)s") % {"rsp_l2gw": rsp_l2gw})
|
LOG.info("response : %(rsp_l2gw)s", {"rsp_l2gw": rsp_l2gw})
|
||||||
if "segmentation_id" in devices["devices"][0]["interfaces"][0]:
|
if "segmentation_id" in devices["devices"][0]["interfaces"][0]:
|
||||||
self.assertEqual(devices["devices"][0]["interfaces"][0][
|
self.assertEqual(devices["devices"][0]["interfaces"][0][
|
||||||
"segmentation_id"][0],
|
"segmentation_id"][0],
|
||||||
@ -258,7 +257,7 @@ class L2GatewayTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
"""
|
"""
|
||||||
show l2gw based on UUID. To see l2gw info we need l2gw id.
|
show l2gw based on UUID. To see l2gw info we need l2gw id.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing l2_gateway_show api"))
|
LOG.info("Testing l2_gateway_show api")
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
l2gw_name = data_utils.rand_name(constants.L2GW)
|
l2gw_name = data_utils.rand_name(constants.L2GW)
|
||||||
@ -301,7 +300,7 @@ class L2GatewayTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
"""
|
"""
|
||||||
list created l2gw.
|
list created l2gw.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing l2_gateway_list api"))
|
LOG.info("Testing l2_gateway_list api")
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
l2gw_name = data_utils.rand_name(constants.L2GW)
|
l2gw_name = data_utils.rand_name(constants.L2GW)
|
||||||
|
@ -22,7 +22,6 @@ from tempest.lib.common.utils import test_utils
|
|||||||
from tempest.lib import decorators
|
from tempest.lib import decorators
|
||||||
from tempest import test
|
from tempest import test
|
||||||
|
|
||||||
from vmware_nsx_tempest._i18n import _LI
|
|
||||||
from vmware_nsx_tempest.common import constants
|
from vmware_nsx_tempest.common import constants
|
||||||
from vmware_nsx_tempest.services import base_l2gw
|
from vmware_nsx_tempest.services import base_l2gw
|
||||||
|
|
||||||
@ -83,7 +82,7 @@ class L2GatewayConnectionTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
Create l2 gateway connection using one vlan. Vlan parameter is
|
Create l2 gateway connection using one vlan. Vlan parameter is
|
||||||
passed into L2GW create.
|
passed into L2GW create.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing test_l2_gateway_connection_create api"))
|
LOG.info("Testing test_l2_gateway_connection_create api")
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
l2gw_name = data_utils.rand_name(constants.L2GW)
|
l2gw_name = data_utils.rand_name(constants.L2GW)
|
||||||
@ -116,7 +115,7 @@ class L2GatewayConnectionTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
Create l2 gateway connection using multiple vlans. Vlan parameter is
|
Create l2 gateway connection using multiple vlans. Vlan parameter is
|
||||||
passed into L2GW create.
|
passed into L2GW create.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing test_l2_gateway_connection_create api"))
|
LOG.info("Testing test_l2_gateway_connection_create api")
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
l2gw_name = data_utils.rand_name(constants.L2GW)
|
l2gw_name = data_utils.rand_name(constants.L2GW)
|
||||||
@ -149,7 +148,7 @@ class L2GatewayConnectionTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
Create l2 gateway connection using one vlan. Vlan parameter is
|
Create l2 gateway connection using one vlan. Vlan parameter is
|
||||||
passed into L2GW connection create.
|
passed into L2GW connection create.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing test_l2_gateway_connection_create api"))
|
LOG.info("Testing test_l2_gateway_connection_create api")
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
l2gw_name = data_utils.rand_name(constants.L2GW)
|
l2gw_name = data_utils.rand_name(constants.L2GW)
|
||||||
@ -186,7 +185,7 @@ class L2GatewayConnectionTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
Create l2 gateway connection using one vlan and tes l2 gateway
|
Create l2 gateway connection using one vlan and tes l2 gateway
|
||||||
connection show api
|
connection show api
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing test_l2_gateway_connection_create api"))
|
LOG.info("Testing test_l2_gateway_connection_create api")
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
l2gw_name = data_utils.rand_name(constants.L2GW)
|
l2gw_name = data_utils.rand_name(constants.L2GW)
|
||||||
@ -231,7 +230,7 @@ class L2GatewayConnectionTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
Create l2 gateway connection using one vlan and test l2 gateway
|
Create l2 gateway connection using one vlan and test l2 gateway
|
||||||
connection list api.
|
connection list api.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing test_l2_gateway_connection_create api"))
|
LOG.info("Testing test_l2_gateway_connection_create api")
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
l2gw_name = data_utils.rand_name(constants.L2GW)
|
l2gw_name = data_utils.rand_name(constants.L2GW)
|
||||||
@ -250,7 +249,7 @@ class L2GatewayConnectionTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
"Response code is not %(code)s" % {
|
"Response code is not %(code)s" % {
|
||||||
"code": constants.EXPECTED_HTTP_RESPONSE_201})
|
"code": constants.EXPECTED_HTTP_RESPONSE_201})
|
||||||
list_rsp = self.l2gwc_client.list_l2_gateway_connections()
|
list_rsp = self.l2gwc_client.list_l2_gateway_connections()
|
||||||
LOG.info(_LI("l2gw connection list response: %s") % list_rsp)
|
LOG.info("l2gw connection list response: %s", list_rsp)
|
||||||
# Assert in case of failure.
|
# Assert in case of failure.
|
||||||
self.assertEqual(constants.EXPECTED_HTTP_RESPONSE_200,
|
self.assertEqual(constants.EXPECTED_HTTP_RESPONSE_200,
|
||||||
list_rsp.response["status"],
|
list_rsp.response["status"],
|
||||||
@ -289,10 +288,10 @@ class L2GatewayConnectionTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
- Recreate l2gw connection
|
- Recreate l2gw connection
|
||||||
- verify with l2gw connection list API.
|
- verify with l2gw connection list API.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing test_l2_gateway_connection_create api"))
|
LOG.info("Testing test_l2_gateway_connection_create api")
|
||||||
# List all the L2GW connection.
|
# List all the L2GW connection.
|
||||||
list_rsp = self.l2gwc_client.list_l2_gateway_connections()
|
list_rsp = self.l2gwc_client.list_l2_gateway_connections()
|
||||||
LOG.info(_LI("l2gw connection list response: %s") % list_rsp)
|
LOG.info("l2gw connection list response: %s", list_rsp)
|
||||||
# Assert in case of failure.
|
# Assert in case of failure.
|
||||||
self.assertEqual(constants.EXPECTED_HTTP_RESPONSE_200,
|
self.assertEqual(constants.EXPECTED_HTTP_RESPONSE_200,
|
||||||
list_rsp.response["status"],
|
list_rsp.response["status"],
|
||||||
@ -333,7 +332,7 @@ class L2GatewayConnectionTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
"code": constants.EXPECTED_HTTP_RESPONSE_201})
|
"code": constants.EXPECTED_HTTP_RESPONSE_201})
|
||||||
# List all the L2GW connection.
|
# List all the L2GW connection.
|
||||||
list_rsp = self.l2gwc_client.list_l2_gateway_connections()
|
list_rsp = self.l2gwc_client.list_l2_gateway_connections()
|
||||||
LOG.info(_LI("l2gw connection list response: %s") % list_rsp)
|
LOG.info("l2gw connection list response: %s", list_rsp)
|
||||||
# Assert in case of failure.
|
# Assert in case of failure.
|
||||||
self.assertEqual(constants.EXPECTED_HTTP_RESPONSE_200,
|
self.assertEqual(constants.EXPECTED_HTTP_RESPONSE_200,
|
||||||
list_rsp.response["status"],
|
list_rsp.response["status"],
|
||||||
@ -353,7 +352,7 @@ class L2GatewayConnectionTest(base_l2gw.BaseL2GatewayTest):
|
|||||||
Delete l2gw will create l2gw and delete recently created l2gw. To
|
Delete l2gw will create l2gw and delete recently created l2gw. To
|
||||||
delete l2gw we need l2gw id.
|
delete l2gw we need l2gw id.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing l2_gateway_connection_delete api"))
|
LOG.info("Testing l2_gateway_connection_delete api")
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
l2gw_name = data_utils.rand_name(constants.L2GW)
|
l2gw_name = data_utils.rand_name(constants.L2GW)
|
||||||
|
@ -24,7 +24,6 @@ from tempest.lib.common.utils import test_utils
|
|||||||
from tempest.lib import decorators
|
from tempest.lib import decorators
|
||||||
from tempest.lib import exceptions as lib_exc
|
from tempest.lib import exceptions as lib_exc
|
||||||
|
|
||||||
from vmware_nsx_tempest._i18n import _LI
|
|
||||||
from vmware_nsx_tempest.common import constants
|
from vmware_nsx_tempest.common import constants
|
||||||
from vmware_nsx_tempest.services import base_l2gw
|
from vmware_nsx_tempest.services import base_l2gw
|
||||||
|
|
||||||
@ -85,7 +84,7 @@ class L2GatewayConnectionNegative(base_l2gw.BaseL2GatewayTest):
|
|||||||
"""
|
"""
|
||||||
Delete l2 gateway with active mapping.
|
Delete l2 gateway with active mapping.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing test_l2_gateway_create api"))
|
LOG.info("Testing test_l2_gateway_create api")
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
l2gw_name = data_utils.rand_name(constants.L2GW)
|
l2gw_name = data_utils.rand_name(constants.L2GW)
|
||||||
@ -112,7 +111,7 @@ class L2GatewayConnectionNegative(base_l2gw.BaseL2GatewayTest):
|
|||||||
"""
|
"""
|
||||||
Recreate l2 gateway connection using same parameters.
|
Recreate l2 gateway connection using same parameters.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing test_l2_gateway_connection_create api"))
|
LOG.info("Testing test_l2_gateway_connection_create api")
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
l2gw_name = data_utils.rand_name(constants.L2GW)
|
l2gw_name = data_utils.rand_name(constants.L2GW)
|
||||||
@ -138,7 +137,7 @@ class L2GatewayConnectionNegative(base_l2gw.BaseL2GatewayTest):
|
|||||||
"""
|
"""
|
||||||
Create l2 gateway connection using non exist l2gw uuid.
|
Create l2 gateway connection using non exist l2gw uuid.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing test_l2_gateway_connection_create api"))
|
LOG.info("Testing test_l2_gateway_connection_create api")
|
||||||
non_exist_l2gw_uuid = NON_EXIST_UUID
|
non_exist_l2gw_uuid = NON_EXIST_UUID
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
@ -160,7 +159,7 @@ class L2GatewayConnectionNegative(base_l2gw.BaseL2GatewayTest):
|
|||||||
"""
|
"""
|
||||||
Create l2 gateway connection using non exist l2gw uuid.
|
Create l2 gateway connection using non exist l2gw uuid.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing test_l2_gateway_connection_create api"))
|
LOG.info("Testing test_l2_gateway_connection_create api")
|
||||||
non_exist_network_uuid = NON_EXIST_UUID
|
non_exist_network_uuid = NON_EXIST_UUID
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
@ -182,7 +181,7 @@ class L2GatewayConnectionNegative(base_l2gw.BaseL2GatewayTest):
|
|||||||
"""
|
"""
|
||||||
Create l2 gateway connection using invalid seg id.
|
Create l2 gateway connection using invalid seg id.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing l2_gateway_create api with segmentation ID"))
|
LOG.info("Testing l2_gateway_create api with segmentation ID")
|
||||||
invalid_seg_id = 20000
|
invalid_seg_id = 20000
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
@ -201,7 +200,7 @@ class L2GatewayConnectionNegative(base_l2gw.BaseL2GatewayTest):
|
|||||||
"""
|
"""
|
||||||
Create l2 gateway connection using invalid seg id.
|
Create l2 gateway connection using invalid seg id.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Testing l2_gateway_create api with segmentation ID"))
|
LOG.info("Testing l2_gateway_create api with segmentation ID")
|
||||||
invalid_seg_id = 2.45
|
invalid_seg_id = 2.45
|
||||||
cluster_info = self.nsx_bridge_cluster_info()
|
cluster_info = self.nsx_bridge_cluster_info()
|
||||||
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
device_name, interface_name = cluster_info[0][0], cluster_info[0][1]
|
||||||
|
@ -24,7 +24,6 @@ from tempest.lib.common.utils import data_utils
|
|||||||
from tempest.lib import decorators
|
from tempest.lib import decorators
|
||||||
from tempest import test
|
from tempest import test
|
||||||
|
|
||||||
from vmware_nsx_tempest._i18n import _LI
|
|
||||||
from vmware_nsx_tempest.services import nsxv3_client
|
from vmware_nsx_tempest.services import nsxv3_client
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -93,7 +92,7 @@ class NSXv3SecGroupTest(base.BaseSecGroupTest):
|
|||||||
group_create_body, name = self._create_security_group()
|
group_create_body, name = self._create_security_group()
|
||||||
secgroup = group_create_body['security_group']
|
secgroup = group_create_body['security_group']
|
||||||
time.sleep(NSX_FIREWALL_REALIZED_DELAY)
|
time.sleep(NSX_FIREWALL_REALIZED_DELAY)
|
||||||
LOG.info(_LI("Create security group with name %(name)s and id %(id)s"),
|
LOG.info("Create security group with name %(name)s and id %(id)s",
|
||||||
{'name': secgroup['name'], 'id': secgroup['id']})
|
{'name': secgroup['name'], 'id': secgroup['id']})
|
||||||
# List security groups and verify if created group is there in response
|
# List security groups and verify if created group is there in response
|
||||||
list_body = self.security_groups_client.list_security_groups()
|
list_body = self.security_groups_client.list_security_groups()
|
||||||
|
@ -22,7 +22,6 @@ from tempest.lib import decorators
|
|||||||
from tempest.lib import exceptions
|
from tempest.lib import exceptions
|
||||||
|
|
||||||
from tempest import test
|
from tempest import test
|
||||||
from vmware_nsx_tempest._i18n import _LI
|
|
||||||
from vmware_nsx_tempest.common import constants
|
from vmware_nsx_tempest.common import constants
|
||||||
from vmware_nsx_tempest.services import nsxv3_client
|
from vmware_nsx_tempest.services import nsxv3_client
|
||||||
|
|
||||||
@ -298,8 +297,8 @@ class ProviderSecurityGroupTest(base.BaseAdminNetworkTest):
|
|||||||
if (provider_sg_name in sec_name['display_name'] and
|
if (provider_sg_name in sec_name['display_name'] and
|
||||||
sg_name not in sec_name['display_name']):
|
sg_name not in sec_name['display_name']):
|
||||||
if count == 0:
|
if count == 0:
|
||||||
LOG.info(_LI("Provider group has high priority over"
|
LOG.info("Provider group has high priority over "
|
||||||
"default sec group"))
|
"default sec group")
|
||||||
break
|
break
|
||||||
count += count
|
count += count
|
||||||
self.assertIn(provider_sg_name, sec_name['display_name'])
|
self.assertIn(provider_sg_name, sec_name['display_name'])
|
||||||
@ -316,8 +315,8 @@ class ProviderSecurityGroupTest(base.BaseAdminNetworkTest):
|
|||||||
try:
|
try:
|
||||||
self.delete_security_group(sg_client, sg_id)
|
self.delete_security_group(sg_client, sg_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.info(_LI("Non Admin tenant can't see admin"
|
LOG.info("Non Admin tenant can't see admin "
|
||||||
"provider security group"))
|
"provider security group")
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@test.attr(type='nsxv3')
|
@test.attr(type='nsxv3')
|
||||||
@ -328,7 +327,7 @@ class ProviderSecurityGroupTest(base.BaseAdminNetworkTest):
|
|||||||
self.create_security_provider_group,
|
self.create_security_provider_group,
|
||||||
self.cmgr_alt, project_id=project_id,
|
self.cmgr_alt, project_id=project_id,
|
||||||
provider=True)
|
provider=True)
|
||||||
LOG.info(_LI("Non-Admin Tenant cannot create provider sec group"))
|
LOG.info("Non-Admin Tenant cannot create provider sec group")
|
||||||
|
|
||||||
@test.attr(type='nsxv3')
|
@test.attr(type='nsxv3')
|
||||||
@decorators.idempotent_id('0d021bb2-9e21-422c-a509-6ac27803b2a2')
|
@decorators.idempotent_id('0d021bb2-9e21-422c-a509-6ac27803b2a2')
|
||||||
|
@ -22,7 +22,6 @@ from tempest.lib import decorators
|
|||||||
from tempest.scenario import manager
|
from tempest.scenario import manager
|
||||||
from tempest import test
|
from tempest import test
|
||||||
|
|
||||||
from vmware_nsx_tempest._i18n import _LI
|
|
||||||
from vmware_nsx_tempest.common import constants
|
from vmware_nsx_tempest.common import constants
|
||||||
from vmware_nsx_tempest.services import nsxv3_client
|
from vmware_nsx_tempest.services import nsxv3_client
|
||||||
|
|
||||||
@ -150,7 +149,7 @@ class TestMDProxy(manager.NetworkScenarioTest):
|
|||||||
break
|
break
|
||||||
exec_cmd_retried += 1
|
exec_cmd_retried += 1
|
||||||
time.sleep(constants.INTERVAL_BETWEEN_EXEC_RETRY_ON_SSH)
|
time.sleep(constants.INTERVAL_BETWEEN_EXEC_RETRY_ON_SSH)
|
||||||
LOG.info(_LI("Tried %s times!!!") % exec_cmd_retried)
|
LOG.info("Tried %s times!!!", exec_cmd_retried)
|
||||||
if check_exist_only:
|
if check_exist_only:
|
||||||
return "Verification is successful!"
|
return "Verification is successful!"
|
||||||
msg = ("Failed while verifying metadata on server. Result "
|
msg = ("Failed while verifying metadata on server. Result "
|
||||||
|
@ -22,8 +22,6 @@ from tempest.lib import decorators
|
|||||||
from tempest.scenario import manager
|
from tempest.scenario import manager
|
||||||
from tempest import test
|
from tempest import test
|
||||||
|
|
||||||
from vmware_nsx_tempest._i18n import _LE
|
|
||||||
|
|
||||||
CONF = config.CONF
|
CONF = config.CONF
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -153,8 +151,8 @@ class TestMultiHVNetworkOps(manager.NetworkScenarioTest):
|
|||||||
(ssh_source, remote_ip, should_connect),
|
(ssh_source, remote_ip, should_connect),
|
||||||
msg)
|
msg)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Unable to access %{dest}s via ssh to "
|
LOG.exception("Unable to access %{dest}s via ssh to "
|
||||||
"floating-ip %{src}s"),
|
"floating-ip %{src}s",
|
||||||
{'dest': remote_ip, 'src': floating_ip})
|
{'dest': remote_ip, 'src': floating_ip})
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -24,8 +24,6 @@ from tempest.lib import decorators
|
|||||||
from tempest.scenario import manager
|
from tempest.scenario import manager
|
||||||
from tempest import test
|
from tempest import test
|
||||||
|
|
||||||
from vmware_nsx_tempest._i18n import _LE
|
|
||||||
|
|
||||||
CONF = config.CONF
|
CONF = config.CONF
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -194,8 +192,8 @@ class TestMultiNetworksOps(manager.NetworkScenarioTest):
|
|||||||
(ssh_source, remote_ip, should_connect),
|
(ssh_source, remote_ip, should_connect),
|
||||||
msg)
|
msg)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Unable to access %{dest}s via ssh to "
|
LOG.exception("Unable to access %{dest}s via ssh to "
|
||||||
"floating-ip %{src}s"),
|
"floating-ip %{src}s",
|
||||||
{'dest': remote_ip, 'src': floating_ip})
|
{'dest': remote_ip, 'src': floating_ip})
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -24,7 +24,6 @@ from tempest.lib import decorators
|
|||||||
from tempest.scenario import manager
|
from tempest.scenario import manager
|
||||||
from tempest import test
|
from tempest import test
|
||||||
|
|
||||||
from vmware_nsx_tempest._i18n import _LE
|
|
||||||
from vmware_nsx_tempest.services import nsxv3_client
|
from vmware_nsx_tempest.services import nsxv3_client
|
||||||
|
|
||||||
CONF = config.CONF
|
CONF = config.CONF
|
||||||
@ -213,8 +212,8 @@ class TestRouterNoNATOps(manager.NetworkScenarioTest):
|
|||||||
(ssh_source, remote_ip, should_connect),
|
(ssh_source, remote_ip, should_connect),
|
||||||
msg)
|
msg)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Unable to access %{dest}s via ssh to "
|
LOG.exception("Unable to access %{dest}s via ssh to "
|
||||||
"floating-ip %{src}s"),
|
"floating-ip %{src}s",
|
||||||
{'dest': remote_ip, 'src': floating_ip})
|
{'dest': remote_ip, 'src': floating_ip})
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -23,8 +23,6 @@ from tempest.lib import decorators
|
|||||||
from tempest.scenario import manager
|
from tempest.scenario import manager
|
||||||
from tempest import test
|
from tempest import test
|
||||||
|
|
||||||
from vmware_nsx_tempest._i18n import _LE
|
|
||||||
|
|
||||||
CONF = config.CONF
|
CONF = config.CONF
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -248,8 +246,8 @@ class TestMicroSegmentationOps(manager.NetworkScenarioTest):
|
|||||||
(ssh_source, remote_ip, should_connect),
|
(ssh_source, remote_ip, should_connect),
|
||||||
msg)
|
msg)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Unable to access %{dest}s via ssh to "
|
LOG.exception("Unable to access %{dest}s via ssh to "
|
||||||
"floating-ip %{src}s"),
|
"floating-ip %{src}s",
|
||||||
{'dest': remote_ip, 'src': floating_ip})
|
{'dest': remote_ip, 'src': floating_ip})
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user