nsxlib refactor - remove cfg usage
Accessing the nsx configuration should be done only outside of the nsxlib or nsxlib tests Change-Id: I8dc1079c11212e4d5691c07e88338f49cfa89430
This commit is contained in:
parent
26ac6063af
commit
40e5810d1d
@ -39,9 +39,10 @@ class NsxLib(security.Security):
|
||||
http_read_timeout=None,
|
||||
conn_idle_timeout=None,
|
||||
http_provider=None,
|
||||
max_attempts=0):
|
||||
max_attempts=0,
|
||||
nsx_api_managers=None):
|
||||
|
||||
# TODO(asarfaty): use max_attempts instead of cfg value
|
||||
self.max_attempts = max_attempts
|
||||
|
||||
# create the Cluster
|
||||
self.cluster = cluster.NSXClusteredAPI(
|
||||
@ -52,10 +53,13 @@ class NsxLib(security.Security):
|
||||
http_timeout=http_timeout,
|
||||
http_read_timeout=http_read_timeout,
|
||||
conn_idle_timeout=conn_idle_timeout,
|
||||
http_provider=http_provider)
|
||||
http_provider=http_provider,
|
||||
nsx_api_managers=nsx_api_managers)
|
||||
|
||||
# create the Client
|
||||
self.client = client.NSX3Client(self.cluster)
|
||||
self.client = client.NSX3Client(
|
||||
self.cluster,
|
||||
max_attempts=max_attempts)
|
||||
|
||||
super(NsxLib, self).__init__()
|
||||
|
||||
@ -68,12 +72,17 @@ class NsxLib(security.Security):
|
||||
resource = "edge-clusters/%s" % edge_cluster_uuid
|
||||
return self.client.get(resource)
|
||||
|
||||
@utils.retry_upon_exception(exceptions.StaleRevision)
|
||||
def update_resource_with_retry(self, resource, payload):
|
||||
revised_payload = self.client.get(resource)
|
||||
for key_name in payload.keys():
|
||||
revised_payload[key_name] = payload[key_name]
|
||||
return self.client.update(resource, revised_payload)
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(exceptions.StaleRevision,
|
||||
max_attempts=self.max_attempts)
|
||||
def _do_update():
|
||||
revised_payload = self.client.get(resource)
|
||||
for key_name in payload.keys():
|
||||
revised_payload[key_name] = payload[key_name]
|
||||
return self.client.update(resource, revised_payload)
|
||||
|
||||
return _do_update()
|
||||
|
||||
def delete_resource_by_values(self, resource,
|
||||
skip_not_found=True, **kwargs):
|
||||
@ -95,7 +104,7 @@ class NsxLib(security.Security):
|
||||
"%(values)s") % {'res': resource,
|
||||
'values': kwargs})
|
||||
raise exceptions.ResourceNotFound(
|
||||
manager=client._get_nsx_managers_from_conf(),
|
||||
manager=self.cluster.nsx_api_managers,
|
||||
operation=err_msg)
|
||||
elif matched_num > 1:
|
||||
LOG.warning(_LW("%(num)s resources in %(res)s matched for values: "
|
||||
@ -126,30 +135,41 @@ class NsxLib(security.Security):
|
||||
|
||||
return self.client.create(resource, body)
|
||||
|
||||
@utils.retry_upon_exception(exceptions.StaleRevision)
|
||||
def delete_logical_switch(self, lswitch_id):
|
||||
resource = 'logical-switches/%s?detach=true&cascade=true' % lswitch_id
|
||||
self.client.delete(resource)
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(exceptions.StaleRevision,
|
||||
max_attempts=self.max_attempts)
|
||||
def _do_delete():
|
||||
resource = ('logical-switches/%s?detach=true&cascade=true' %
|
||||
lswitch_id)
|
||||
self.client.delete(resource)
|
||||
|
||||
_do_delete()
|
||||
|
||||
def get_logical_switch(self, logical_switch_id):
|
||||
resource = "logical-switches/%s" % logical_switch_id
|
||||
return self.client.get(resource)
|
||||
|
||||
@utils.retry_upon_exception(exceptions.StaleRevision)
|
||||
def update_logical_switch(self, lswitch_id, name=None, admin_state=None,
|
||||
tags=None):
|
||||
resource = "logical-switches/%s" % lswitch_id
|
||||
lswitch = self.get_logical_switch(lswitch_id)
|
||||
if name is not None:
|
||||
lswitch['display_name'] = name
|
||||
if admin_state is not None:
|
||||
if admin_state:
|
||||
lswitch['admin_state'] = nsx_constants.ADMIN_STATE_UP
|
||||
else:
|
||||
lswitch['admin_state'] = nsx_constants.ADMIN_STATE_DOWN
|
||||
if tags is not None:
|
||||
lswitch['tags'] = tags
|
||||
return self.client.update(resource, lswitch)
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(exceptions.StaleRevision,
|
||||
max_attempts=self.max_attempts)
|
||||
def _do_update():
|
||||
resource = "logical-switches/%s" % lswitch_id
|
||||
lswitch = self.get_logical_switch(lswitch_id)
|
||||
if name is not None:
|
||||
lswitch['display_name'] = name
|
||||
if admin_state is not None:
|
||||
if admin_state:
|
||||
lswitch['admin_state'] = nsx_constants.ADMIN_STATE_UP
|
||||
else:
|
||||
lswitch['admin_state'] = nsx_constants.ADMIN_STATE_DOWN
|
||||
if tags is not None:
|
||||
lswitch['tags'] = tags
|
||||
return self.client.update(resource, lswitch)
|
||||
|
||||
return _do_update()
|
||||
|
||||
def add_nat_rule(self, logical_router_id, action, translated_network,
|
||||
source_net=None, dest_net=None,
|
||||
|
@ -16,7 +16,6 @@
|
||||
import requests
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_serialization import jsonutils
|
||||
from vmware_nsx._i18n import _, _LW
|
||||
@ -26,6 +25,7 @@ LOG = log.getLogger(__name__)
|
||||
|
||||
ERRORS = {requests.codes.NOT_FOUND: exceptions.ResourceNotFound,
|
||||
requests.codes.PRECONDITION_FAILED: exceptions.StaleRevision}
|
||||
DEFAULT_ERROR = exceptions.ManagerError
|
||||
|
||||
|
||||
class RESTClient(object):
|
||||
@ -81,6 +81,10 @@ class RESTClient(object):
|
||||
def url_post(self, url, body, headers=None):
|
||||
return self._rest_call(url, method='POST', body=body, headers=headers)
|
||||
|
||||
def _raise_error(self, status_code, operation, result_msg):
|
||||
error = ERRORS.get(status_code, DEFAULT_ERROR)
|
||||
raise error(manager='', operation=operation, details=result_msg)
|
||||
|
||||
def _validate_result(self, result, expected, operation):
|
||||
if result.status_code not in expected:
|
||||
result_msg = result.json() if result.content else ''
|
||||
@ -92,8 +96,6 @@ class RESTClient(object):
|
||||
for code in expected]),
|
||||
'body': result_msg})
|
||||
|
||||
manager_error = ERRORS.get(
|
||||
result.status_code, exceptions.ManagerError)
|
||||
if isinstance(result_msg, dict) and 'error_message' in result_msg:
|
||||
related_errors = [error['error_message'] for error in
|
||||
result_msg.get('related_errors', [])]
|
||||
@ -101,10 +103,7 @@ class RESTClient(object):
|
||||
if related_errors:
|
||||
result_msg += " relatedErrors: %s" % ' '.join(
|
||||
related_errors)
|
||||
raise manager_error(
|
||||
manager=_get_nsx_managers_from_conf(),
|
||||
operation=operation,
|
||||
details=result_msg)
|
||||
self._raise_error(result.status_code, operation, result_msg)
|
||||
|
||||
@classmethod
|
||||
def merge_headers(cls, *headers):
|
||||
@ -175,7 +174,11 @@ class NSX3Client(JSONRESTClient):
|
||||
_NSX_V1_API_PREFIX = 'api/v1/'
|
||||
|
||||
def __init__(self, connection, url_prefix=None,
|
||||
default_headers=None):
|
||||
default_headers=None,
|
||||
nsx_api_managers=None,
|
||||
max_attempts=0):
|
||||
|
||||
self.nsx_api_managers = nsx_api_managers or []
|
||||
|
||||
url_prefix = url_prefix or NSX3Client._NSX_V1_API_PREFIX
|
||||
if url_prefix and NSX3Client._NSX_V1_API_PREFIX not in url_prefix:
|
||||
@ -184,52 +187,15 @@ class NSX3Client(JSONRESTClient):
|
||||
else:
|
||||
url_prefix = "%s/%s" % (NSX3Client._NSX_V1_API_PREFIX,
|
||||
url_prefix or '')
|
||||
self.max_attempts = max_attempts
|
||||
|
||||
super(NSX3Client, self).__init__(
|
||||
connection, url_prefix=url_prefix,
|
||||
default_headers=default_headers)
|
||||
|
||||
|
||||
# TODO(boden): remove mod level fns and vars below
|
||||
_DEFAULT_API_CLUSTER = None
|
||||
|
||||
|
||||
def _get_default_api_cluster():
|
||||
global _DEFAULT_API_CLUSTER
|
||||
if _DEFAULT_API_CLUSTER is None:
|
||||
# removes circular ref between client / cluster
|
||||
import vmware_nsx.nsxlib.v3.cluster as nsx_cluster
|
||||
_DEFAULT_API_CLUSTER = nsx_cluster.NSXClusteredAPI()
|
||||
return _DEFAULT_API_CLUSTER
|
||||
|
||||
|
||||
def _set_default_api_cluster(cluster):
|
||||
global _DEFAULT_API_CLUSTER
|
||||
old = _DEFAULT_API_CLUSTER
|
||||
_DEFAULT_API_CLUSTER = cluster
|
||||
return old
|
||||
|
||||
|
||||
def _get_client(client):
|
||||
return client or NSX3Client(_get_default_api_cluster())
|
||||
|
||||
|
||||
# NOTE(shihli): tmp until all refs use client class
|
||||
def _get_nsx_managers_from_conf():
|
||||
return cfg.CONF.nsx_v3.nsx_api_managers
|
||||
|
||||
|
||||
def get_resource(resource, client=None):
|
||||
return _get_client(client).get(resource)
|
||||
|
||||
|
||||
def create_resource(resource, data, client=None):
|
||||
return _get_client(client).url_post(resource, body=data)
|
||||
|
||||
|
||||
def update_resource(resource, data, client=None):
|
||||
return _get_client(client).update(resource, body=data)
|
||||
|
||||
|
||||
def delete_resource(resource, client=None):
|
||||
return _get_client(client).delete(resource)
|
||||
def _raise_error(self, status_code, operation, result_msg):
|
||||
"""Override the Rest client errors to add the manager IPs"""
|
||||
error = ERRORS.get(status_code, DEFAULT_ERROR)
|
||||
raise error(manager=self.nsx_api_managers,
|
||||
operation=operation,
|
||||
details=result_msg)
|
||||
|
@ -29,7 +29,6 @@ from eventlet import pools
|
||||
from neutron.callbacks import events
|
||||
from neutron.callbacks import registry
|
||||
from neutron.callbacks import resources
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_service import loopingcall
|
||||
from requests import adapters
|
||||
@ -89,9 +88,9 @@ class TimeoutSession(requests.Session):
|
||||
at the session level.
|
||||
"""
|
||||
|
||||
def __init__(self, timeout=None, read_timeout=None):
|
||||
self.timeout = timeout or cfg.CONF.nsx_v3.http_timeout
|
||||
self.read_timeout = read_timeout or cfg.CONF.nsx_v3.http_read_timeout
|
||||
def __init__(self, timeout, read_timeout):
|
||||
self.timeout = timeout
|
||||
self.read_timeout = read_timeout
|
||||
super(TimeoutSession, self).__init__()
|
||||
|
||||
# wrapper timeouts at the session level
|
||||
@ -461,23 +460,22 @@ class NSXClusteredAPI(ClusteredAPI):
|
||||
http_timeout=None,
|
||||
http_read_timeout=None,
|
||||
conn_idle_timeout=None,
|
||||
http_provider=None):
|
||||
self.retries = retries or cfg.CONF.nsx_v3.http_retries
|
||||
self.insecure = insecure or cfg.CONF.nsx_v3.insecure
|
||||
http_provider=None,
|
||||
nsx_api_managers=None):
|
||||
|
||||
# username, password & ca_file may be lists, in order to support
|
||||
# different credentials per nsx manager
|
||||
self._username = username or cfg.CONF.nsx_v3.nsx_api_user
|
||||
self._password = password or cfg.CONF.nsx_v3.nsx_api_password
|
||||
self._ca_file = ca_file or cfg.CONF.nsx_v3.ca_file
|
||||
self._username = username
|
||||
self._password = password
|
||||
self._ca_file = ca_file
|
||||
|
||||
self.conns_per_pool = (concurrent_connections or
|
||||
cfg.CONF.nsx_v3.concurrent_connections)
|
||||
self.http_timeout = http_timeout or cfg.CONF.nsx_v3.http_timeout
|
||||
self.http_read_timeout = (http_read_timeout or
|
||||
cfg.CONF.nsx_v3.http_read_timeout)
|
||||
self.conn_idle_timeout = (conn_idle_timeout or
|
||||
cfg.CONF.nsx_v3.conn_idle_timeout)
|
||||
self.retries = retries
|
||||
self.insecure = insecure
|
||||
self.conns_per_pool = concurrent_connections
|
||||
self.http_timeout = http_timeout
|
||||
self.http_read_timeout = http_read_timeout
|
||||
self.conn_idle_timeout = conn_idle_timeout
|
||||
self.nsx_api_managers = nsx_api_managers
|
||||
|
||||
self._http_provider = http_provider or NSXRequestsHTTPProvider()
|
||||
|
||||
@ -498,7 +496,7 @@ class NSXClusteredAPI(ClusteredAPI):
|
||||
uri if uri.startswith('http') else
|
||||
"%s://%s" % (self._http_provider.default_scheme, uri))
|
||||
|
||||
conf_urls = cfg.CONF.nsx_v3.nsx_api_managers[:]
|
||||
conf_urls = self.nsx_api_managers[:]
|
||||
urls = []
|
||||
providers = []
|
||||
provider_index = -1
|
||||
|
@ -16,18 +16,18 @@
|
||||
import netaddr
|
||||
from neutron_lib.api import validators
|
||||
from neutron_lib import constants
|
||||
from oslo_config import cfg
|
||||
|
||||
from vmware_nsx.nsxlib.v3 import utils
|
||||
|
||||
|
||||
def build_dhcp_server_config(network, subnet, port, project_name):
|
||||
# Prepare the configutation for a new logical DHCP server.
|
||||
def build_dhcp_server_config(network, subnet, port, project_name,
|
||||
nameservers, dhcp_profile_uuid, dns_domain):
|
||||
# Prepare the configuration for a new logical DHCP server.
|
||||
server_ip = "%s/%u" % (port['fixed_ips'][0]['ip_address'],
|
||||
netaddr.IPNetwork(subnet['cidr']).prefixlen)
|
||||
dns_nameservers = subnet['dns_nameservers']
|
||||
if not dns_nameservers or not validators.is_attr_set(dns_nameservers):
|
||||
dns_nameservers = cfg.CONF.nsx_v3.nameservers
|
||||
dns_nameservers = nameservers
|
||||
gateway_ip = subnet['gateway_ip']
|
||||
if not validators.is_attr_set(gateway_ip):
|
||||
gateway_ip = None
|
||||
@ -56,10 +56,10 @@ def build_dhcp_server_config(network, subnet, port, project_name):
|
||||
tags = utils.build_v3_tags_payload(
|
||||
network, resource_type='os-neutron-net-id', project_name=project_name)
|
||||
return {'name': name,
|
||||
'dhcp_profile_id': cfg.CONF.nsx_v3.dhcp_profile_uuid,
|
||||
'dhcp_profile_id': dhcp_profile_uuid,
|
||||
'server_ip': server_ip,
|
||||
'dns_nameservers': dns_nameservers,
|
||||
'domain_name': cfg.CONF.nsx_v3.dns_domain,
|
||||
'domain_name': dns_domain,
|
||||
'gateway_ip': gateway_ip,
|
||||
'options': options,
|
||||
'tags': tags}
|
||||
|
@ -17,11 +17,9 @@
|
||||
|
||||
import uuid
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from vmware_nsx._i18n import _, _LW
|
||||
from vmware_nsx.nsxlib import v3
|
||||
from vmware_nsx.nsxlib.v3 import exceptions
|
||||
from vmware_nsx.nsxlib.v3 import nsx_constants as consts
|
||||
from vmware_nsx.nsxlib.v3 import utils as nsxlib_utils
|
||||
@ -51,21 +49,8 @@ class NSGroupManager(object):
|
||||
NESTED_GROUP_NAME = 'OS Nested Group'
|
||||
NESTED_GROUP_DESCRIPTION = ('OpenStack NSGroup. Do not delete.')
|
||||
|
||||
def __init__(self, size):
|
||||
# TODO(asarfaty): integrate this in a better way..
|
||||
self.nsx = v3.NsxLib(
|
||||
username=cfg.CONF.nsx_v3.nsx_api_user,
|
||||
password=cfg.CONF.nsx_v3.nsx_api_password,
|
||||
retries=cfg.CONF.nsx_v3.http_retries,
|
||||
insecure=cfg.CONF.nsx_v3.insecure,
|
||||
ca_file=cfg.CONF.nsx_v3.ca_file,
|
||||
concurrent_connections=cfg.CONF.nsx_v3.concurrent_connections,
|
||||
http_timeout=cfg.CONF.nsx_v3.http_timeout,
|
||||
http_read_timeout=cfg.CONF.nsx_v3.http_read_timeout,
|
||||
conn_idle_timeout=cfg.CONF.nsx_v3.conn_idle_timeout,
|
||||
http_provider=None,
|
||||
max_attempts=cfg.CONF.nsx_v3.retries)
|
||||
|
||||
def __init__(self, nsxlib, size):
|
||||
self.nsx = nsxlib
|
||||
self._nested_groups = self._init_nested_groups(size)
|
||||
self._size = len(self._nested_groups)
|
||||
|
||||
|
@ -17,10 +17,7 @@ import abc
|
||||
import collections
|
||||
import six
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from vmware_nsx._i18n import _
|
||||
from vmware_nsx.nsxlib.v3 import client
|
||||
from vmware_nsx.nsxlib.v3 import exceptions
|
||||
from vmware_nsx.nsxlib.v3 import nsx_constants
|
||||
from vmware_nsx.nsxlib.v3 import utils
|
||||
@ -285,40 +282,47 @@ class LogicalPort(AbstractRESTResource):
|
||||
attachment=attachment))
|
||||
return self._client.create(body=body)
|
||||
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=cfg.CONF.nsx_v3.retries)
|
||||
def delete(self, lport_id):
|
||||
return self._client.url_delete('%s?detach=true' % lport_id)
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self._client.max_attempts)
|
||||
def _do_delete():
|
||||
return self._client.url_delete('%s?detach=true' % lport_id)
|
||||
|
||||
return _do_delete()
|
||||
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=cfg.CONF.nsx_v3.retries)
|
||||
def update(self, lport_id, vif_uuid,
|
||||
name=None, admin_state=None,
|
||||
address_bindings=None, switch_profile_ids=None,
|
||||
tags_update=None,
|
||||
attachment_type=nsx_constants.ATTACHMENT_VIF,
|
||||
parent_vif_id=None, parent_tag=None):
|
||||
lport = self.get(lport_id)
|
||||
tags = lport.get('tags', [])
|
||||
if tags_update:
|
||||
tags = utils.update_v3_tags(tags, tags_update)
|
||||
attachment = self._prepare_attachment(vif_uuid, parent_vif_id,
|
||||
parent_tag, address_bindings,
|
||||
attachment_type)
|
||||
lport.update(self._build_body_attrs(
|
||||
display_name=name,
|
||||
admin_state=admin_state, tags=tags,
|
||||
address_bindings=address_bindings,
|
||||
switch_profile_ids=switch_profile_ids,
|
||||
attachment=attachment))
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self._client.max_attempts)
|
||||
def do_update():
|
||||
lport = self.get(lport_id)
|
||||
tags = lport.get('tags', [])
|
||||
if tags_update:
|
||||
tags = utils.update_v3_tags(tags, tags_update)
|
||||
attachment = self._prepare_attachment(vif_uuid, parent_vif_id,
|
||||
parent_tag, address_bindings,
|
||||
attachment_type)
|
||||
lport.update(self._build_body_attrs(
|
||||
display_name=name,
|
||||
admin_state=admin_state, tags=tags,
|
||||
address_bindings=address_bindings,
|
||||
switch_profile_ids=switch_profile_ids,
|
||||
attachment=attachment))
|
||||
|
||||
# If revision_id of the payload that we send is older than what NSX has
|
||||
# then we will get a 412: Precondition Failed. In that case we need to
|
||||
# re-fetch, patch the response and send it again with the
|
||||
# new revision_id
|
||||
return self._client.update(lport_id, body=lport)
|
||||
# If revision_id of the payload that we send is older than what
|
||||
# NSX has, we will get a 412: Precondition Failed.
|
||||
# In that case we need to re-fetch, patch the response and send
|
||||
# it again with the new revision_id
|
||||
return self._client.update(lport_id, body=lport)
|
||||
return do_update()
|
||||
|
||||
|
||||
class LogicalRouter(AbstractRESTResource):
|
||||
@ -342,18 +346,22 @@ class LogicalRouter(AbstractRESTResource):
|
||||
def delete(self, lrouter_id):
|
||||
return self._client.url_delete(lrouter_id)
|
||||
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=cfg.CONF.nsx_v3.retries)
|
||||
def update(self, lrouter_id, *args, **kwargs):
|
||||
lrouter = self.get(lrouter_id)
|
||||
for k in kwargs:
|
||||
lrouter[k] = kwargs[k]
|
||||
# If revision_id of the payload that we send is older than what NSX has
|
||||
# then we will get a 412: Precondition Failed. In that case we need to
|
||||
# re-fetch, patch the response and send it again with the
|
||||
# new revision_id
|
||||
return self._client.update(lrouter_id, body=lrouter)
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self._client.max_attempts)
|
||||
def _do_update():
|
||||
lrouter = self.get(lrouter_id)
|
||||
for k in kwargs:
|
||||
lrouter[k] = kwargs[k]
|
||||
# If revision_id of the payload that we send is older than what
|
||||
# NSX has, we will get a 412: Precondition Failed.
|
||||
# In that case we need to re-fetch, patch the response and send
|
||||
# it again with the new revision_id
|
||||
return self._client.update(lrouter_id, body=lrouter)
|
||||
|
||||
return _do_update()
|
||||
|
||||
|
||||
class LogicalRouterPort(AbstractRESTResource):
|
||||
@ -389,24 +397,32 @@ class LogicalRouterPort(AbstractRESTResource):
|
||||
|
||||
return self._client.create(body=body)
|
||||
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=cfg.CONF.nsx_v3.retries)
|
||||
def update(self, logical_port_id, **kwargs):
|
||||
logical_router_port = self.get(logical_port_id)
|
||||
for k in kwargs:
|
||||
logical_router_port[k] = kwargs[k]
|
||||
# If revision_id of the payload that we send is older than what NSX has
|
||||
# then we will get a 412: Precondition Failed. In that case we need to
|
||||
# re-fetch, patch the response and send it again with the
|
||||
# new revision_id
|
||||
return self._client.update(logical_port_id, body=logical_router_port)
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self._client.max_attempts)
|
||||
def _do_update():
|
||||
logical_router_port = self.get(logical_port_id)
|
||||
for k in kwargs:
|
||||
logical_router_port[k] = kwargs[k]
|
||||
# If revision_id of the payload that we send is older than what
|
||||
# NSX has, we will get a 412: Precondition Failed.
|
||||
# In that case we need to re-fetch, patch the response and send
|
||||
# it again with the new revision_id
|
||||
return self._client.update(logical_port_id,
|
||||
body=logical_router_port)
|
||||
return _do_update()
|
||||
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=cfg.CONF.nsx_v3.retries)
|
||||
def delete(self, logical_port_id):
|
||||
return self._client.url_delete(logical_port_id)
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self._client.max_attempts)
|
||||
def _do_delete():
|
||||
return self._client.url_delete(logical_port_id)
|
||||
|
||||
return _do_delete()
|
||||
|
||||
def get_by_lswitch_id(self, logical_switch_id):
|
||||
resource = '?logical_switch_id=%s' % logical_switch_id
|
||||
@ -422,7 +438,7 @@ class LogicalRouterPort(AbstractRESTResource):
|
||||
err_msg = (_("Logical router link port not found on logical "
|
||||
"switch %s") % logical_switch_id)
|
||||
raise exceptions.ResourceNotFound(
|
||||
manager=client._get_nsx_managers_from_conf(),
|
||||
manager=self._client.nsx_api_managers,
|
||||
operation=err_msg)
|
||||
|
||||
def update_by_lswitch_id(self, logical_router_id, ls_id, **payload):
|
||||
@ -444,7 +460,7 @@ class LogicalRouterPort(AbstractRESTResource):
|
||||
if port['resource_type'] == nsx_constants.LROUTERPORT_LINKONTIER1:
|
||||
return port
|
||||
raise exceptions.ResourceNotFound(
|
||||
manager=client._get_nsx_managers_from_conf(),
|
||||
manager=self._client.nsx_api_managers,
|
||||
operation="get router link port")
|
||||
|
||||
|
||||
@ -511,17 +527,21 @@ class LogicalDhcpServer(AbstractRESTResource):
|
||||
options, tags)
|
||||
return self._client.create(body=body)
|
||||
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=cfg.CONF.nsx_v3.retries)
|
||||
def update(self, uuid, dhcp_profile_id=None, server_ip=None, name=None,
|
||||
dns_nameservers=None, domain_name=None, gateway_ip=False,
|
||||
options=None, tags=None):
|
||||
body = self._client.get(uuid)
|
||||
self._construct_server(body, dhcp_profile_id, server_ip, name,
|
||||
dns_nameservers, domain_name, gateway_ip,
|
||||
options, tags)
|
||||
return self._client.update(uuid, body=body)
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self._client.max_attempts)
|
||||
def _do_update():
|
||||
body = self._client.get(uuid)
|
||||
self._construct_server(body, dhcp_profile_id, server_ip, name,
|
||||
dns_nameservers, domain_name, gateway_ip,
|
||||
options, tags)
|
||||
return self._client.update(uuid, body=body)
|
||||
|
||||
return _do_update()
|
||||
|
||||
def create_binding(self, server_uuid, mac, ip, hostname=None,
|
||||
lease_time=None, options=None):
|
||||
@ -539,14 +559,18 @@ class LogicalDhcpServer(AbstractRESTResource):
|
||||
url = "%s/static-bindings/%s" % (server_uuid, binding_uuid)
|
||||
return self._client.url_get(url)
|
||||
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=cfg.CONF.nsx_v3.retries)
|
||||
def update_binding(self, server_uuid, binding_uuid, **kwargs):
|
||||
body = self.get_binding(server_uuid, binding_uuid)
|
||||
body.update(kwargs)
|
||||
url = "%s/static-bindings/%s" % (server_uuid, binding_uuid)
|
||||
return self._client.url_put(url, body)
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self._client.max_attempts)
|
||||
def _do_update():
|
||||
body = self.get_binding(server_uuid, binding_uuid)
|
||||
body.update(kwargs)
|
||||
url = "%s/static-bindings/%s" % (server_uuid, binding_uuid)
|
||||
return self._client.url_put(url, body)
|
||||
|
||||
return _do_update()
|
||||
|
||||
def delete_binding(self, server_uuid, binding_uuid):
|
||||
url = "%s/static-bindings/%s" % (server_uuid, binding_uuid)
|
||||
|
@ -23,7 +23,6 @@ from oslo_log import log
|
||||
from oslo_utils import excutils
|
||||
|
||||
from vmware_nsx._i18n import _LE, _LW
|
||||
from vmware_nsx.nsxlib.v3 import client as nsxclient
|
||||
from vmware_nsx.nsxlib.v3 import exceptions
|
||||
from vmware_nsx.nsxlib.v3 import nsx_constants as consts
|
||||
from vmware_nsx.nsxlib.v3 import utils
|
||||
@ -286,20 +285,26 @@ class Security(object):
|
||||
return self.client.get(
|
||||
'ns-groups?populate_references=false').get('results', [])
|
||||
|
||||
@utils.retry_upon_exception(exceptions.StaleRevision)
|
||||
def update_nsgroup(self, nsgroup_id, display_name=None, description=None,
|
||||
membership_criteria=None, members=None):
|
||||
nsgroup = self.read_nsgroup(nsgroup_id)
|
||||
if display_name is not None:
|
||||
nsgroup['display_name'] = display_name
|
||||
if description is not None:
|
||||
nsgroup['description'] = description
|
||||
if members is not None:
|
||||
nsgroup['members'] = members
|
||||
if membership_criteria is not None:
|
||||
nsgroup['membership_criteria'] = [membership_criteria]
|
||||
return self.client.update(
|
||||
'ns-groups/%s' % nsgroup_id, nsgroup)
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self.max_attempts)
|
||||
def _do_update():
|
||||
nsgroup = self.read_nsgroup(nsgroup_id)
|
||||
if display_name is not None:
|
||||
nsgroup['display_name'] = display_name
|
||||
if description is not None:
|
||||
nsgroup['description'] = description
|
||||
if members is not None:
|
||||
nsgroup['members'] = members
|
||||
if membership_criteria is not None:
|
||||
nsgroup['membership_criteria'] = [membership_criteria]
|
||||
return self.client.update(
|
||||
'ns-groups/%s' % nsgroup_id, nsgroup)
|
||||
|
||||
return _do_update()
|
||||
|
||||
def get_nsgroup_member_expression(self, target_type, target_id):
|
||||
return {
|
||||
@ -309,10 +314,16 @@ class Security(object):
|
||||
'op': consts.EQUALS,
|
||||
'value': target_id}
|
||||
|
||||
@utils.retry_upon_exception(exceptions.ManagerError)
|
||||
def _update_nsgroup_with_members(self, nsgroup_id, members, action):
|
||||
members_update = 'ns-groups/%s?action=%s' % (nsgroup_id, action)
|
||||
return self.client.create(members_update, members)
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self.max_attempts)
|
||||
def _do_update():
|
||||
members_update = 'ns-groups/%s?action=%s' % (nsgroup_id, action)
|
||||
return self.client.create(members_update, members)
|
||||
|
||||
return _do_update()
|
||||
|
||||
def add_nsgroup_members(self, nsgroup_id, target_type, target_ids):
|
||||
members = []
|
||||
@ -383,27 +394,33 @@ class Security(object):
|
||||
resource += '&id=%s' % other_section
|
||||
return self.client.create(resource, body)
|
||||
|
||||
@utils.retry_upon_exception(exceptions.StaleRevision)
|
||||
def update_section(self, section_id, display_name=None, description=None,
|
||||
applied_tos=None, rules=None):
|
||||
resource = 'firewall/sections/%s' % section_id
|
||||
section = self.read_section(section_id)
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self.max_attempts)
|
||||
def _do_update():
|
||||
resource = 'firewall/sections/%s' % section_id
|
||||
section = self.read_section(section_id)
|
||||
|
||||
if rules is not None:
|
||||
resource += '?action=update_with_rules'
|
||||
section.update({'rules': rules})
|
||||
if display_name is not None:
|
||||
section['display_name'] = display_name
|
||||
if description is not None:
|
||||
section['description'] = description
|
||||
if applied_tos is not None:
|
||||
section['applied_tos'] = [self.get_nsgroup_reference(nsg_id)
|
||||
for nsg_id in applied_tos]
|
||||
if rules is not None:
|
||||
return nsxclient.create_resource(resource, section)
|
||||
elif any(p is not None for p in (display_name, description,
|
||||
applied_tos)):
|
||||
return self.client.update(resource, section)
|
||||
if rules is not None:
|
||||
resource += '?action=update_with_rules'
|
||||
section.update({'rules': rules})
|
||||
if display_name is not None:
|
||||
section['display_name'] = display_name
|
||||
if description is not None:
|
||||
section['description'] = description
|
||||
if applied_tos is not None:
|
||||
section['applied_tos'] = [self.get_nsgroup_reference(nsg_id)
|
||||
for nsg_id in applied_tos]
|
||||
if rules is not None:
|
||||
return self.client.create(resource, section)
|
||||
elif any(p is not None for p in (display_name, description,
|
||||
applied_tos)):
|
||||
return self.client.update(resource, section)
|
||||
|
||||
return _do_update()
|
||||
|
||||
def read_section(self, section_id):
|
||||
resource = 'firewall/sections/%s' % section_id
|
||||
|
@ -27,6 +27,7 @@ MAX_RESOURCE_TYPE_LEN = 20
|
||||
MAX_TAG_LEN = 40
|
||||
NSX_NEUTRON_PLUGIN = 'NSX Neutron plugin'
|
||||
OS_NEUTRON_ID_SCOPE = 'os-neutron-id'
|
||||
DEFAULT_MAX_ATTEMPTS = 10
|
||||
|
||||
|
||||
def is_internal_resource(nsx_resource):
|
||||
@ -111,9 +112,8 @@ def update_v3_tags(current_tags, tags_update):
|
||||
return tags
|
||||
|
||||
|
||||
#Todo(asarfaty): figure out a way to use an NsxLib class variable in the
|
||||
#retry decorator instead of the configuration/constant one
|
||||
def retry_upon_exception(exc, delay=500, max_delay=2000, max_attempts=10):
|
||||
def retry_upon_exception(exc, delay=500, max_delay=2000,
|
||||
max_attempts=DEFAULT_MAX_ATTEMPTS):
|
||||
return retrying.retry(retry_on_exception=lambda e: isinstance(e, exc),
|
||||
wait_exponential_multiplier=delay,
|
||||
wait_exponential_max=max_delay,
|
||||
|
@ -83,7 +83,6 @@ from vmware_nsx.extensions import advancedserviceproviders as as_providers
|
||||
from vmware_nsx.extensions import maclearning as mac_ext
|
||||
from vmware_nsx.extensions import providersecuritygroup as provider_sg
|
||||
from vmware_nsx.extensions import securitygrouplogging as sg_logging
|
||||
from vmware_nsx.nsxlib import v3 as nsxlib
|
||||
from vmware_nsx.nsxlib.v3 import exceptions as nsx_lib_exc
|
||||
from vmware_nsx.nsxlib.v3 import native_dhcp
|
||||
from vmware_nsx.nsxlib.v3 import ns_group_manager
|
||||
@ -92,6 +91,7 @@ from vmware_nsx.nsxlib.v3 import resources as nsx_resources
|
||||
from vmware_nsx.nsxlib.v3 import router
|
||||
from vmware_nsx.nsxlib.v3 import security
|
||||
from vmware_nsx.nsxlib.v3 import utils as nsxlib_utils
|
||||
from vmware_nsx.plugins.nsx_v3 import utils as v3_utils
|
||||
from vmware_nsx.services.qos.common import utils as qos_com_utils
|
||||
from vmware_nsx.services.qos.nsx_v3 import utils as qos_utils
|
||||
from vmware_nsx.services.trunk.nsx_v3 import driver as trunk_driver
|
||||
@ -165,18 +165,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
||||
super(NsxV3Plugin, self).__init__()
|
||||
LOG.info(_LI("Starting NsxV3Plugin"))
|
||||
|
||||
self.nsxlib = nsxlib.NsxLib(
|
||||
username=cfg.CONF.nsx_v3.nsx_api_user,
|
||||
password=cfg.CONF.nsx_v3.nsx_api_password,
|
||||
retries=cfg.CONF.nsx_v3.http_retries,
|
||||
insecure=cfg.CONF.nsx_v3.insecure,
|
||||
ca_file=cfg.CONF.nsx_v3.ca_file,
|
||||
concurrent_connections=cfg.CONF.nsx_v3.concurrent_connections,
|
||||
http_timeout=cfg.CONF.nsx_v3.http_timeout,
|
||||
http_read_timeout=cfg.CONF.nsx_v3.http_read_timeout,
|
||||
conn_idle_timeout=cfg.CONF.nsx_v3.conn_idle_timeout,
|
||||
http_provider=None,
|
||||
max_attempts=cfg.CONF.nsx_v3.retries)
|
||||
self.nsxlib = v3_utils.get_nsxlib_wrapper()
|
||||
|
||||
self._nsx_version = self.nsxlib.get_version()
|
||||
LOG.info(_LI("NSX Version: %s"), self._nsx_version)
|
||||
@ -416,7 +405,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
||||
def _init_nsgroup_manager_and_default_section_rules(self):
|
||||
with locking.LockManager.get_lock('nsxv3_nsgroup_manager_init'):
|
||||
nsgroup_manager = ns_group_manager.NSGroupManager(
|
||||
cfg.CONF.nsx_v3.number_of_nested_groups)
|
||||
self.nsxlib, cfg.CONF.nsx_v3.number_of_nested_groups)
|
||||
section_description = ("This section is handled by OpenStack to "
|
||||
"contain default rules on security-groups.")
|
||||
section_id = self.nsxlib.init_default_section(
|
||||
@ -927,7 +916,10 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
||||
neutron_port = super(NsxV3Plugin, self).create_port(
|
||||
context, {'port': port_data})
|
||||
server_data = native_dhcp.build_dhcp_server_config(
|
||||
network, subnet, neutron_port, context.tenant_name)
|
||||
network, subnet, neutron_port, context.tenant_name,
|
||||
cfg.CONF.nsx_v3.nameservers,
|
||||
cfg.CONF.nsx_v3.dhcp_profile_uuid,
|
||||
cfg.CONF.nsx_v3.dns_domain)
|
||||
nsx_net_id = self._get_network_nsx_id(context, network['id'])
|
||||
tags = nsxlib_utils.build_v3_tags_payload(
|
||||
neutron_port, resource_type='os-neutron-dport-id',
|
||||
|
33
vmware_nsx/plugins/nsx_v3/utils.py
Normal file
33
vmware_nsx/plugins/nsx_v3/utils.py
Normal file
@ -0,0 +1,33 @@
|
||||
# Copyright 2016 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from oslo_config import cfg
|
||||
|
||||
from vmware_nsx.nsxlib import v3
|
||||
|
||||
|
||||
def get_nsxlib_wrapper():
|
||||
return v3.NsxLib(
|
||||
username=cfg.CONF.nsx_v3.nsx_api_user,
|
||||
password=cfg.CONF.nsx_v3.nsx_api_password,
|
||||
retries=cfg.CONF.nsx_v3.http_retries,
|
||||
insecure=cfg.CONF.nsx_v3.insecure,
|
||||
ca_file=cfg.CONF.nsx_v3.ca_file,
|
||||
concurrent_connections=cfg.CONF.nsx_v3.concurrent_connections,
|
||||
http_timeout=cfg.CONF.nsx_v3.http_timeout,
|
||||
http_read_timeout=cfg.CONF.nsx_v3.http_read_timeout,
|
||||
conn_idle_timeout=cfg.CONF.nsx_v3.conn_idle_timeout,
|
||||
http_provider=None,
|
||||
max_attempts=cfg.CONF.nsx_v3.retries,
|
||||
nsx_api_managers=cfg.CONF.nsx_v3.nsx_api_managers)
|
@ -32,6 +32,7 @@ from vmware_nsx.nsxlib import v3 as nsxlib
|
||||
from vmware_nsx.nsxlib.v3 import exceptions as nsxlib_exc
|
||||
from vmware_nsx.nsxlib.v3 import resources as nsx_resources
|
||||
from vmware_nsx.nsxlib.v3 import utils as nsxlib_utils
|
||||
from vmware_nsx.plugins.nsx_v3 import utils as v3_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -279,13 +280,14 @@ class NsxV3Driver(base_driver.TaasBaseDriver,
|
||||
context._plugin_context.session, dest_port_id)
|
||||
# Create port mirror session on the backend
|
||||
try:
|
||||
pm_session = nsxlib.NsxLib().create_port_mirror_session(
|
||||
source_ports=nsx_src_ports,
|
||||
dest_ports=nsx_dest_ports,
|
||||
direction=direction,
|
||||
description=tf.get('description'),
|
||||
name=tf.get('name'),
|
||||
tags=tags)
|
||||
nsxlib = v3_utils.get_nsxlib_wrapper()
|
||||
pm_session = nsxlib.create_port_mirror_session(
|
||||
source_ports=nsx_src_ports,
|
||||
dest_ports=nsx_dest_ports,
|
||||
direction=direction,
|
||||
description=tf.get('description'),
|
||||
name=tf.get('name'),
|
||||
tags=tags)
|
||||
except nsxlib_exc.ManagerError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE("Unable to create port mirror session %s "
|
||||
@ -305,7 +307,8 @@ class NsxV3Driver(base_driver.TaasBaseDriver,
|
||||
LOG.error(_LE("Unable to create port mirror session db "
|
||||
"mappings for tap flow %s. Rolling back "
|
||||
"changes in Neutron."), tf['id'])
|
||||
nsxlib.NsxLib().delete_port_mirror_session(pm_session['id'])
|
||||
nsxlib.delete_port_mirror_session(
|
||||
pm_session['id'])
|
||||
|
||||
def delete_tap_flow_precommit(self, context):
|
||||
pass
|
||||
|
@ -89,7 +89,10 @@ def nsx_update_dhcp_bindings(resource, event, trigger, **kwargs):
|
||||
# logical port of the Neutron DHCP port.
|
||||
network = neutron_client.get_network(port['network_id'])
|
||||
server_data = native_dhcp.build_dhcp_server_config(
|
||||
network, subnet, port, 'admin')
|
||||
network, subnet, port, 'admin',
|
||||
cfg.CONF.nsx_v3.nameservers,
|
||||
cfg.CONF.nsx_v3.dhcp_profile_uuid,
|
||||
cfg.CONF.nsx_v3.dns_domain)
|
||||
dhcp_server = dhcp_server_resource.create(**server_data)
|
||||
LOG.info(_LI("Created logical DHCP server %(server)s for "
|
||||
"network %(network)s"),
|
||||
|
@ -15,12 +15,11 @@
|
||||
|
||||
from neutron import context
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from oslo_config import cfg
|
||||
|
||||
from vmware_nsx.db import db as nsx_db
|
||||
from vmware_nsx.nsxlib import v3
|
||||
from vmware_nsx.nsxlib.v3 import nsx_constants
|
||||
from vmware_nsx.plugins.nsx_v3 import plugin
|
||||
from vmware_nsx.plugins.nsx_v3 import utils as v3_utils
|
||||
|
||||
|
||||
def get_nsxv3_client():
|
||||
@ -28,18 +27,7 @@ def get_nsxv3_client():
|
||||
|
||||
|
||||
def get_connected_nsxlib():
|
||||
return v3.NsxLib(
|
||||
username=cfg.CONF.nsx_v3.nsx_api_user,
|
||||
password=cfg.CONF.nsx_v3.nsx_api_password,
|
||||
retries=cfg.CONF.nsx_v3.http_retries,
|
||||
insecure=cfg.CONF.nsx_v3.insecure,
|
||||
ca_file=cfg.CONF.nsx_v3.ca_file,
|
||||
concurrent_connections=cfg.CONF.nsx_v3.concurrent_connections,
|
||||
http_timeout=cfg.CONF.nsx_v3.http_timeout,
|
||||
http_read_timeout=cfg.CONF.nsx_v3.http_read_timeout,
|
||||
conn_idle_timeout=cfg.CONF.nsx_v3.conn_idle_timeout,
|
||||
http_provider=None,
|
||||
max_attempts=cfg.CONF.nsx_v3.retries)
|
||||
return v3_utils.get_nsxlib_wrapper()
|
||||
|
||||
|
||||
class NeutronDbClient(db_base_plugin_v2.NeutronDbPluginV2):
|
||||
|
@ -190,7 +190,7 @@ class TestNSGroupManager(nsxlib_testcase.NsxLibTestCase):
|
||||
@_mock_create_and_list_nsgroups
|
||||
def test_first_initialization(self):
|
||||
size = 5
|
||||
cont_manager = ns_group_manager.NSGroupManager(size)
|
||||
cont_manager = ns_group_manager.NSGroupManager(self.nsxlib, size)
|
||||
nested_groups = cont_manager.nested_groups
|
||||
self.assertEqual({i: NSG_IDS[i] for i in range(size)},
|
||||
nested_groups)
|
||||
@ -204,11 +204,12 @@ class TestNSGroupManager(nsxlib_testcase.NsxLibTestCase):
|
||||
|
||||
size = 2
|
||||
# Creates 2 nested groups.
|
||||
ns_group_manager.NSGroupManager(size)
|
||||
ns_group_manager.NSGroupManager(self.nsxlib, size)
|
||||
|
||||
size = 5
|
||||
# Creates another 3 nested groups.
|
||||
nested_groups = ns_group_manager.NSGroupManager(size).nested_groups
|
||||
nested_groups = ns_group_manager.NSGroupManager(
|
||||
self.nsxlib, size).nested_groups
|
||||
self.assertEqual({i: NSG_IDS[i] for i in range(size)},
|
||||
nested_groups)
|
||||
|
||||
@ -222,7 +223,7 @@ class TestNSGroupManager(nsxlib_testcase.NsxLibTestCase):
|
||||
# according to its id and the number of nested groups.
|
||||
|
||||
size = 5
|
||||
cont_manager = ns_group_manager.NSGroupManager(size)
|
||||
cont_manager = ns_group_manager.NSGroupManager(self.nsxlib, size)
|
||||
nsgroup_id = 'nsgroup_id'
|
||||
|
||||
with mock.patch.object(cont_manager, '_hash_uuid', return_value=7):
|
||||
@ -257,7 +258,7 @@ class TestNSGroupManager(nsxlib_testcase.NsxLibTestCase):
|
||||
remove_member_mock.side_effect = _remove_member_mock
|
||||
|
||||
size = 5
|
||||
cont_manager = ns_group_manager.NSGroupManager(size)
|
||||
cont_manager = ns_group_manager.NSGroupManager(self.nsxlib, size)
|
||||
nsgroup_id = 'nsgroup_id'
|
||||
|
||||
with mock.patch.object(cont_manager, '_hash_uuid', return_value=7):
|
||||
@ -292,7 +293,7 @@ class TestNSGroupManager(nsxlib_testcase.NsxLibTestCase):
|
||||
add_member_mock,
|
||||
remove_member_mock):
|
||||
size = 3
|
||||
cont_manager = ns_group_manager.NSGroupManager(size)
|
||||
cont_manager = ns_group_manager.NSGroupManager(self.nsxlib, size)
|
||||
# list_nsgroups will return nested group 1 and 3, but not group 2.
|
||||
# FIXME: Not sure what this mock does. no one calls this method now.
|
||||
#with mock.patch.object(vmware_nsx.nsxlib.v3.NsxLib.list_nsgroups,
|
||||
@ -300,7 +301,6 @@ class TestNSGroupManager(nsxlib_testcase.NsxLibTestCase):
|
||||
# list_nsgroups_mock = lambda: list_nsgroups_mock()[::2]
|
||||
# invoking the initialization process again, it should process
|
||||
# groups 1 and 3 and create group 2.
|
||||
cont_manager = ns_group_manager.NSGroupManager(size)
|
||||
self.assertEqual({1: NSG_IDS[0],
|
||||
2: NSG_IDS[3],
|
||||
3: NSG_IDS[2]},
|
||||
@ -309,7 +309,7 @@ class TestNSGroupManager(nsxlib_testcase.NsxLibTestCase):
|
||||
@_mock_create_and_list_nsgroups
|
||||
def test_suggest_nested_group(self):
|
||||
size = 5
|
||||
cont_manager = ns_group_manager.NSGroupManager(size)
|
||||
cont_manager = ns_group_manager.NSGroupManager(self.nsxlib, size)
|
||||
# We expect that the first suggested index is 2
|
||||
expected_suggested_groups = NSG_IDS[2:5] + NSG_IDS[:2]
|
||||
suggest_group = lambda: cont_manager._suggest_nested_group('fake-id')
|
||||
|
@ -52,6 +52,9 @@ from vmware_nsx.tests.unit.nsxlib.v3 import nsxlib_testcase
|
||||
|
||||
|
||||
PLUGIN_NAME = 'vmware_nsx.plugin.NsxV3Plugin'
|
||||
NSX_TZ_NAME = 'default transport zone'
|
||||
NSX_DHCP_PROFILE_ID = 'default dhcp profile'
|
||||
NSX_METADATA_PROXY_ID = 'default metadata proxy'
|
||||
|
||||
|
||||
def _mock_create_firewall_rules(*args):
|
||||
@ -135,6 +138,17 @@ def _mock_nsx_backend_calls():
|
||||
class NsxV3PluginTestCaseMixin(test_plugin.NeutronDbPluginV2TestCase,
|
||||
nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def setup_conf_overrides(self):
|
||||
cfg.CONF.set_override('default_overlay_tz', NSX_TZ_NAME, 'nsx_v3')
|
||||
cfg.CONF.set_override('native_dhcp_metadata', False, 'nsx_v3')
|
||||
cfg.CONF.set_override('dhcp_profile_uuid',
|
||||
NSX_DHCP_PROFILE_ID, 'nsx_v3')
|
||||
cfg.CONF.set_override('metadata_proxy_uuid',
|
||||
NSX_METADATA_PROXY_ID, 'nsx_v3')
|
||||
cfg.CONF.set_override(
|
||||
'network_scheduler_driver',
|
||||
'neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler')
|
||||
|
||||
def setUp(self, plugin=PLUGIN_NAME,
|
||||
ext_mgr=None,
|
||||
service_plugins=None):
|
||||
@ -142,7 +156,7 @@ class NsxV3PluginTestCaseMixin(test_plugin.NeutronDbPluginV2TestCase,
|
||||
self._patchers = []
|
||||
|
||||
_mock_nsx_backend_calls()
|
||||
nsxlib_testcase.NsxClientTestCase.setup_conf_overrides()
|
||||
self.setup_conf_overrides()
|
||||
|
||||
super(NsxV3PluginTestCaseMixin, self).setUp(plugin=plugin,
|
||||
ext_mgr=ext_mgr)
|
||||
|
@ -81,23 +81,6 @@ def make_fake_metadata_proxy():
|
||||
"edge_cluster_member_indexes": [0, 1]}
|
||||
|
||||
|
||||
def get_resource(resource):
|
||||
return {'id': resource.split('/')[-1]}
|
||||
|
||||
|
||||
def create_resource(resource, data):
|
||||
data['id'] = uuidutils.generate_uuid()
|
||||
return data
|
||||
|
||||
|
||||
def update_resource(resource, data):
|
||||
return resource
|
||||
|
||||
|
||||
def delete_resource(resource):
|
||||
pass
|
||||
|
||||
|
||||
class MockRequestsResponse(object):
|
||||
def __init__(self, status_code, content=None):
|
||||
self.status_code = status_code
|
||||
|
@ -17,28 +17,23 @@ import copy
|
||||
import mock
|
||||
import unittest
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import uuidutils
|
||||
from requests import exceptions as requests_exceptions
|
||||
|
||||
from vmware_nsx.nsxlib import v3 as nsxlib
|
||||
from vmware_nsx.nsxlib.v3 import client as nsx_client
|
||||
from vmware_nsx.nsxlib.v3 import cluster as nsx_cluster
|
||||
from vmware_nsx.plugins.nsx_v3 import utils as v3_utils
|
||||
|
||||
NSX_USER = 'admin'
|
||||
NSX_PASSWORD = 'default'
|
||||
NSX_MANAGER = '1.2.3.4'
|
||||
NSX_INSECURE = False
|
||||
NSX_CERT = '/opt/stack/certs/nsx.pem'
|
||||
NSX_HTTP_RETRIES = 10
|
||||
NSX_HTTP_TIMEOUT = 10
|
||||
NSX_HTTP_READ_TIMEOUT = 180
|
||||
NSX_TZ_NAME = 'default transport zone'
|
||||
NSX_DHCP_PROFILE_ID = 'default dhcp profile'
|
||||
NSX_METADATA_PROXY_ID = 'default metadata proxy'
|
||||
|
||||
V3_CLIENT_PKG = 'vmware_nsx.nsxlib.v3.client'
|
||||
BRIDGE_FNS = ['create_resource', 'delete_resource',
|
||||
'update_resource', 'get_resource']
|
||||
NSX_CONCURENT_CONN = 10
|
||||
NSX_CONN_IDLE_TIME = 10
|
||||
|
||||
|
||||
def _mock_nsxlib():
|
||||
@ -91,43 +86,11 @@ def _mock_nsxlib():
|
||||
|
||||
class NsxLibTestCase(unittest.TestCase):
|
||||
|
||||
@classmethod
|
||||
def setup_conf_overrides(cls):
|
||||
cfg.CONF.set_override('default_overlay_tz', NSX_TZ_NAME, 'nsx_v3')
|
||||
cfg.CONF.set_override('native_dhcp_metadata', False, 'nsx_v3')
|
||||
cfg.CONF.set_override('dhcp_profile_uuid',
|
||||
NSX_DHCP_PROFILE_ID, 'nsx_v3')
|
||||
cfg.CONF.set_override('metadata_proxy_uuid',
|
||||
NSX_METADATA_PROXY_ID, 'nsx_v3')
|
||||
cfg.CONF.set_override('nsx_api_user', NSX_USER, 'nsx_v3')
|
||||
cfg.CONF.set_override('nsx_api_password', NSX_PASSWORD, 'nsx_v3')
|
||||
cfg.CONF.set_override('nsx_api_managers', [NSX_MANAGER], 'nsx_v3')
|
||||
cfg.CONF.set_override('insecure', NSX_INSECURE, 'nsx_v3')
|
||||
cfg.CONF.set_override('ca_file', NSX_CERT, 'nsx_v3')
|
||||
cfg.CONF.set_override('http_timeout', NSX_HTTP_TIMEOUT, 'nsx_v3')
|
||||
cfg.CONF.set_override('http_read_timeout',
|
||||
NSX_HTTP_READ_TIMEOUT, 'nsx_v3')
|
||||
cfg.CONF.set_override(
|
||||
'network_scheduler_driver',
|
||||
'neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler')
|
||||
|
||||
def setUp(self, *args, **kwargs):
|
||||
super(NsxLibTestCase, self).setUp()
|
||||
NsxClientTestCase.setup_conf_overrides()
|
||||
_mock_nsxlib()
|
||||
|
||||
self.nsxlib = nsxlib.NsxLib(
|
||||
username=cfg.CONF.nsx_v3.nsx_api_user,
|
||||
password=cfg.CONF.nsx_v3.nsx_api_password,
|
||||
retries=cfg.CONF.nsx_v3.http_retries,
|
||||
insecure=cfg.CONF.nsx_v3.insecure,
|
||||
ca_file=cfg.CONF.nsx_v3.ca_file,
|
||||
concurrent_connections=cfg.CONF.nsx_v3.concurrent_connections,
|
||||
http_timeout=cfg.CONF.nsx_v3.http_timeout,
|
||||
http_read_timeout=cfg.CONF.nsx_v3.http_read_timeout,
|
||||
conn_idle_timeout=cfg.CONF.nsx_v3.conn_idle_timeout,
|
||||
http_provider=None,
|
||||
max_attempts=cfg.CONF.nsx_v3.retries)
|
||||
self.nsxlib = v3_utils.get_nsxlib_wrapper()
|
||||
|
||||
# print diffs when assert comparisons fail
|
||||
self.maxDiff = None
|
||||
@ -158,42 +121,34 @@ class MemoryMockAPIProvider(nsx_cluster.AbstractHTTPProvider):
|
||||
|
||||
class NsxClientTestCase(NsxLibTestCase):
|
||||
|
||||
class MockBridge(object):
|
||||
"""The MockBridge class is used to mock the nsxlib/v3/client.py file,
|
||||
and includes the relevant functions & classes APIs
|
||||
"""
|
||||
|
||||
def __init__(self, api_client):
|
||||
self._client = api_client
|
||||
|
||||
def get_resource(self, resource):
|
||||
return nsx_client.get_resource(
|
||||
resource, client=self._client)
|
||||
|
||||
def create_resource(self, resource, data):
|
||||
return nsx_client.create_resource(
|
||||
resource, data, client=self._client)
|
||||
|
||||
def delete_resource(self, resource):
|
||||
return nsx_client.delete_resource(
|
||||
resource, client=self._client)
|
||||
|
||||
def update_resource(self, resource, data):
|
||||
return nsx_client.update_resource(
|
||||
resource, data, client=self._client)
|
||||
|
||||
def NSX3Client(self, cluster_api):
|
||||
return self._client
|
||||
|
||||
def _set_default_api_cluster(self, cluster_api):
|
||||
pass
|
||||
|
||||
class MockNSXClusteredAPI(nsx_cluster.NSXClusteredAPI):
|
||||
|
||||
def __init__(self, session_response=None):
|
||||
def __init__(
|
||||
self, session_response=None,
|
||||
username=None,
|
||||
password=None,
|
||||
retries=None,
|
||||
insecure=None,
|
||||
ca_file=None,
|
||||
concurrent_connections=None,
|
||||
http_timeout=None,
|
||||
http_read_timeout=None,
|
||||
conn_idle_timeout=None,
|
||||
nsx_api_managers=None):
|
||||
super(NsxClientTestCase.MockNSXClusteredAPI, self).__init__(
|
||||
username=username or NSX_USER,
|
||||
password=password or NSX_PASSWORD,
|
||||
retries=retries or NSX_HTTP_RETRIES,
|
||||
insecure=insecure if insecure is not None else NSX_INSECURE,
|
||||
ca_file=ca_file or NSX_CERT,
|
||||
concurrent_connections=(concurrent_connections or
|
||||
NSX_CONCURENT_CONN),
|
||||
http_timeout=http_timeout or NSX_HTTP_TIMEOUT,
|
||||
http_read_timeout=http_read_timeout or NSX_HTTP_READ_TIMEOUT,
|
||||
conn_idle_timeout=conn_idle_timeout or NSX_CONN_IDLE_TIME,
|
||||
http_provider=NsxClientTestCase.MockHTTPProvider(
|
||||
session_response=session_response))
|
||||
session_response=session_response),
|
||||
nsx_api_managers=nsx_api_managers or [NSX_MANAGER])
|
||||
self._record = mock.Mock()
|
||||
|
||||
def record_call(self, request, **kwargs):
|
||||
@ -284,9 +239,9 @@ class NsxClientTestCase(NsxLibTestCase):
|
||||
def validate_connection(self, cluster_api, endpoint, conn):
|
||||
assert conn is not None
|
||||
|
||||
def mock_nsx_clustered_api(self, session_response=None):
|
||||
def mock_nsx_clustered_api(self, session_response=None, **kwargs):
|
||||
return NsxClientTestCase.MockNSXClusteredAPI(
|
||||
session_response=session_response)
|
||||
session_response=session_response, **kwargs)
|
||||
|
||||
def mocked_resource(self, resource_class, mock_validate=True,
|
||||
session_response=None):
|
||||
@ -318,17 +273,22 @@ class NsxClientTestCase(NsxLibTestCase):
|
||||
|
||||
return client
|
||||
|
||||
def mocked_rest_fns(self, module, attr, mock_validate=True,
|
||||
mock_cluster=None, client=None):
|
||||
if client is None:
|
||||
client = nsx_client.NSX3Client(
|
||||
mock_cluster or self.mock_nsx_clustered_api())
|
||||
mocked_fns = NsxClientTestCase.MockBridge(client)
|
||||
mocked_fns.JSONRESTClient = nsx_client.JSONRESTClient
|
||||
def new_mocked_cluster(self, conf_managers, validate_conn_func,
|
||||
concurrent_connections=None):
|
||||
mock_provider = mock.Mock()
|
||||
mock_provider.default_scheme = 'https'
|
||||
mock_provider.validate_connection = validate_conn_func
|
||||
|
||||
if mock_validate:
|
||||
mock.patch.object(client, '_validate_result').start()
|
||||
|
||||
mock.patch.object(module, attr, new=mocked_fns).start()
|
||||
|
||||
return mocked_fns
|
||||
return nsx_cluster.NSXClusteredAPI(
|
||||
username=NSX_USER,
|
||||
password=NSX_PASSWORD,
|
||||
retries=NSX_HTTP_RETRIES,
|
||||
insecure=NSX_INSECURE,
|
||||
ca_file=NSX_CERT,
|
||||
concurrent_connections=(concurrent_connections or
|
||||
NSX_CONCURENT_CONN),
|
||||
http_timeout=NSX_HTTP_TIMEOUT,
|
||||
http_read_timeout=NSX_HTTP_READ_TIMEOUT,
|
||||
conn_idle_timeout=NSX_CONN_IDLE_TIME,
|
||||
http_provider=mock_provider,
|
||||
nsx_api_managers=conf_managers)
|
||||
|
@ -275,7 +275,7 @@ class NsxV3APIClientBridgeTestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def test_get_resource(self):
|
||||
api = self.new_mocked_client(client.NSX3Client)
|
||||
client.get_resource('ports', client=api)
|
||||
api.get('ports')
|
||||
|
||||
assert_json_call(
|
||||
'get', api,
|
||||
@ -283,9 +283,7 @@ class NsxV3APIClientBridgeTestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def test_create_resource(self):
|
||||
api = self.new_mocked_client(client.NSX3Client)
|
||||
client.create_resource(
|
||||
'ports', {'resource-name': 'port1'},
|
||||
client=api)
|
||||
api.create('ports', {'resource-name': 'port1'})
|
||||
|
||||
assert_json_call(
|
||||
'post', api,
|
||||
@ -294,8 +292,7 @@ class NsxV3APIClientBridgeTestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def test_update_resource(self):
|
||||
api = self.new_mocked_client(client.NSX3Client)
|
||||
client.update_resource(
|
||||
'ports/1', {'name': 'a-new-name'}, client=api)
|
||||
api.update('ports/1', {'name': 'a-new-name'})
|
||||
|
||||
assert_json_call(
|
||||
'put', api,
|
||||
@ -304,7 +301,7 @@ class NsxV3APIClientBridgeTestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def test_delete_resource(self):
|
||||
api = self.new_mocked_client(client.NSX3Client)
|
||||
client.delete_resource('ports/11', client=api)
|
||||
api.delete('ports/11')
|
||||
|
||||
assert_json_call(
|
||||
'delete', api,
|
||||
|
@ -17,7 +17,6 @@ import mock
|
||||
import six.moves.urllib.parse as urlparse
|
||||
import unittest
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_serialization import jsonutils
|
||||
from requests import exceptions as requests_exceptions
|
||||
from vmware_nsx.nsxlib.v3 import client
|
||||
@ -89,64 +88,35 @@ class NsxV3ClusteredAPITestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def test_conf_providers_no_scheme(self):
|
||||
conf_managers = ['8.9.10.11', '9.10.11.12:4433']
|
||||
cfg.CONF.set_override(
|
||||
'nsx_api_managers', conf_managers, 'nsx_v3')
|
||||
|
||||
mock_provider = mock.Mock()
|
||||
mock_provider.default_scheme = 'https'
|
||||
mock_provider.validate_connection = _validate_conn_up
|
||||
|
||||
api = cluster.NSXClusteredAPI(http_provider=mock_provider)
|
||||
api = self.new_mocked_cluster(conf_managers, _validate_conn_up)
|
||||
|
||||
self._assert_providers(
|
||||
api, [(p, "https://%s" % p) for p in conf_managers])
|
||||
|
||||
def test_conf_providers_with_scheme(self):
|
||||
conf_managers = ['http://8.9.10.11:8080', 'https://9.10.11.12:4433']
|
||||
cfg.CONF.set_override(
|
||||
'nsx_api_managers', conf_managers, 'nsx_v3')
|
||||
|
||||
mock_provider = mock.Mock()
|
||||
mock_provider.default_scheme = 'https'
|
||||
mock_provider.validate_connection = _validate_conn_up
|
||||
|
||||
api = cluster.NSXClusteredAPI(http_provider=mock_provider)
|
||||
api = self.new_mocked_cluster(conf_managers, _validate_conn_up)
|
||||
|
||||
self._assert_providers(
|
||||
api, [(urlparse.urlparse(p).netloc, p) for p in conf_managers])
|
||||
|
||||
def test_http_retries(self):
|
||||
cfg.CONF.set_override(
|
||||
'http_retries', 9, 'nsx_v3')
|
||||
|
||||
api = self.mock_nsx_clustered_api()
|
||||
api = self.mock_nsx_clustered_api(retries=9)
|
||||
with api.endpoints['1.2.3.4'].pool.item() as session:
|
||||
self.assertEqual(
|
||||
session.adapters['https://'].max_retries.total, 9)
|
||||
|
||||
def test_conns_per_pool(self):
|
||||
cfg.CONF.set_override(
|
||||
'concurrent_connections', 11, 'nsx_v3')
|
||||
conf_managers = ['8.9.10.11', '9.10.11.12:4433']
|
||||
cfg.CONF.set_override(
|
||||
'nsx_api_managers', conf_managers, 'nsx_v3')
|
||||
|
||||
mock_provider = mock.Mock()
|
||||
mock_provider.default_scheme = 'https'
|
||||
mock_provider.validate_connection = _validate_conn_up
|
||||
|
||||
api = cluster.NSXClusteredAPI(http_provider=mock_provider)
|
||||
api = self.new_mocked_cluster(
|
||||
conf_managers, _validate_conn_up,
|
||||
concurrent_connections=11)
|
||||
|
||||
for ep_id, ep in api.endpoints.items():
|
||||
self.assertEqual(ep.pool.max_size, 11)
|
||||
|
||||
def test_timeouts(self):
|
||||
cfg.CONF.set_override(
|
||||
'http_read_timeout', 37, 'nsx_v3')
|
||||
cfg.CONF.set_override(
|
||||
'http_timeout', 7, 'nsx_v3')
|
||||
|
||||
api = self.mock_nsx_clustered_api()
|
||||
api = self.mock_nsx_clustered_api(http_read_timeout=37, http_timeout=7)
|
||||
api.get('logical-ports')
|
||||
mock_call = api.recorded_calls.method_calls[0]
|
||||
name, args, kwargs = mock_call
|
||||
@ -157,14 +127,8 @@ class ClusteredAPITestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def _test_health(self, validate_fn, expected_health):
|
||||
conf_managers = ['8.9.10.11', '9.10.11.12']
|
||||
cfg.CONF.set_override(
|
||||
'nsx_api_managers', conf_managers, 'nsx_v3')
|
||||
api = self.new_mocked_cluster(conf_managers, validate_fn)
|
||||
|
||||
mock_provider = mock.Mock()
|
||||
mock_provider.default_scheme = 'https'
|
||||
|
||||
mock_provider.validate_connection = validate_fn
|
||||
api = cluster.NSXClusteredAPI(http_provider=mock_provider)
|
||||
self.assertEqual(api.health, expected_health)
|
||||
|
||||
def test_orange_health(self):
|
||||
@ -183,14 +147,7 @@ class ClusteredAPITestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def test_cluster_validate_with_exception(self):
|
||||
conf_managers = ['8.9.10.11', '9.10.11.12', '10.11.12.13']
|
||||
cfg.CONF.set_override(
|
||||
'nsx_api_managers', conf_managers, 'nsx_v3')
|
||||
|
||||
mock_provider = mock.Mock()
|
||||
mock_provider.default_scheme = 'https'
|
||||
|
||||
mock_provider.validate_connection = _validate_conn_down
|
||||
api = cluster.NSXClusteredAPI(http_provider=mock_provider)
|
||||
api = self.new_mocked_cluster(conf_managers, _validate_conn_down)
|
||||
|
||||
self.assertEqual(len(api.endpoints), 3)
|
||||
self.assertRaises(nsxlib_exc.ServiceClusterUnavailable,
|
||||
@ -218,10 +175,7 @@ class ClusteredAPITestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def test_cluster_round_robin_servicing(self):
|
||||
conf_managers = ['8.9.10.11', '9.10.11.12', '10.11.12.13']
|
||||
cfg.CONF.set_override(
|
||||
'nsx_api_managers', conf_managers, 'nsx_v3')
|
||||
|
||||
api = self.mock_nsx_clustered_api()
|
||||
api = self.mock_nsx_clustered_api(nsx_api_managers=conf_managers)
|
||||
api._validate = mock.Mock()
|
||||
|
||||
eps = list(api._endpoints.values())
|
||||
|
Loading…
Reference in New Issue
Block a user