Merge "Plugin housekeeper"

This commit is contained in:
Zuul 2017-12-21 16:41:30 +00:00 committed by Gerrit Code Review
commit 30885efb36
12 changed files with 1077 additions and 3 deletions

View File

@ -0,0 +1,62 @@
Plugin Housekeeper
==================
During the Neutron plugin's operation, system may enter an inconsistent state
due to synchronization issues between different components, e.g Neutron and NSX
or NSX and vCenter.
Some of these inconsistencies may impact the operation of various system
elements.
The Housekeeping mechanism should:
a) Detect such inconsistencies and warn about them.
b) Resolve inconsistencies when possible.
Some of these inconsistencies can be resolved using the Admin utility, yet it
requires manual operation by the administrator while the housekeeping mechanism
should be automatic.
Configuration
-------------
Housekeeping mechanism uses two configuration parameters:
nsxv.housekeeping_jobs: The housekeeper can be configured which tasks to
execute and which should be skipped.
nsxv.housekeeping_readonly: Housekeeper may attempt to fix a broken environment
when this flag is set to False, or otherwise will just warn about
inconsistencies.
Operation
---------
The housekeeping mechanism is an extension to the Neutron plugin. Therefore
it can be triggered by accessing the extension's URL with an administrator
context.
A naive devstack example could be::
source devstack/openrc admin demo
export AUTH_TOKEN=`openstack token issue | awk '/ id /{print $4}'`
curl -X PUT -s -H "X-Auth-Token: $AUTH_TOKEN" -H 'Content-Type: application/json' -d '{"housekeeper": {}}' http://<IP address>:9696/v2.0/housekeepers/all
Where <IP address> would be the Neutron controller's IP or the virtual IP of
the load balancer which manages the Neutron controllers.
It is important to use the virtual IP in case of a load balanced active-backup
Neutron servers, as otherwise the housekeeping request may be handled by the
wrong controller.
To operate the housekeeper periodically as it should, it should be scheduled
via a timing mechanism such as Linux cron.
Plugin Jobs
-----------
NSX-v
~~~~~
error_dhcp_edge: scans for DHCP Edge appliances which are in ERROR state.
When in non-readonly mode, the job will attempt recovery of the DHCP edges by
removing stale elements from the Neutron DB and reconfigure the interfaces at
the backend when required.

View File

@ -74,6 +74,8 @@ openstack.nsxclient.v2 =
project_plugin_create = vmware_nsx.osc.v2.project_plugin_map:CreateProjectPluginMap
project_plugin_show = vmware_nsx.osc.v2.project_plugin_map:ShowProjectPluginMap
project_plugin_list = vmware_nsx.osc.v2.project_plugin_map:ListProjectPluginMap
vmware_nsx.neutron.nsxv.housekeeper.jobs =
error_dhcp_edge = vmware_nsx.plugins.nsx_v.housekeeper.error_dhcp_edge:ErrorDhcpEdgeJob
[build_sphinx]
source-dir = doc/source

View File

@ -705,6 +705,12 @@ nsxv_opts = [
default=True,
help=_("If False, different tenants will not use the same "
"DHCP edge or router edge.")),
cfg.ListOpt('housekeeping_jobs',
default=['error_dhcp_edge'],
help=_("List of the enabled housekeeping jobs")),
cfg.BoolOpt('housekeeping_readonly',
default=True,
help=_("Housekeeping will only warn about breakage.")),
]
# define the configuration of each NSX-V availability zone.

View File

@ -0,0 +1,106 @@
# Copyright 2017 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron.api.v2 import resource_helper
from neutron_lib.api import extensions
from neutron_lib import exceptions as nexception
from vmware_nsx._i18n import _
HOUSEKEEPER_RESOURCE_NAME = "housekeeper"
HOUSEKEEPERS = "housekeepers"
# The housekeeper tasks table is read only
RESOURCE_ATTRIBUTE_MAP = {
HOUSEKEEPERS: {
'name': {
'allow_post': False, 'allow_put': False, 'is_visible': True},
'description': {
'allow_post': False, 'allow_put': False, 'is_visible': True},
'enabled': {
'allow_post': False, 'allow_put': False, 'is_visible': True},
}
}
class Housekeeper(extensions.ExtensionDescriptor):
"""API extension for NSX housekeeper jobs."""
@classmethod
def get_name(cls):
return "Housekeeper"
@classmethod
def get_alias(cls):
return HOUSEKEEPER_RESOURCE_NAME
@classmethod
def get_description(cls):
return "NSX plugin housekeeping services."
@classmethod
def get_updated(cls):
return "2016-11-20T00:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
member_actions = {}
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
None,
action_map=member_actions,
register_quota=True,
translate_name=True)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
class HousekeeperReadOnly(nexception.NotAuthorized):
message = _("NSX housekeeper tasks are read-only.")
class HousekeeperPluginBase(object):
@abc.abstractmethod
def create_housekeeper(self, context, housekeeper):
raise HousekeeperReadOnly()
@abc.abstractmethod
def update_housekeeper(self, context, name, housekeeper):
pass
@abc.abstractmethod
def get_housekeeper(self, context, name, fields=None):
pass
@abc.abstractmethod
def delete_housekeeper(self, context, name):
raise HousekeeperReadOnly()
@abc.abstractmethod
def get_housekeepers(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
pass

View File

@ -0,0 +1,41 @@
# Copyright 2017 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron_lib.plugins import directory
from oslo_log import log
import six
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class BaseJob(object):
def __init__(self, readonly):
self.readonly = readonly
self.plugin = directory.get_plugin()
@abc.abstractmethod
def get_name(self):
pass
@abc.abstractmethod
def get_description(self):
pass
@abc.abstractmethod
def run(self, context):
pass

View File

@ -0,0 +1,84 @@
# Copyright 2017 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
import stevedore
from neutron_lib import exceptions as n_exc
from vmware_nsx.common import locking
LOG = log.getLogger(__name__)
ALL_DUMMY_JOB = {
'name': 'all',
'description': 'Execute all housekeepers',
'enabled': True}
class NsxvHousekeeper(stevedore.named.NamedExtensionManager):
def __init__(self, hk_ns, hk_jobs):
self.readonly = cfg.CONF.nsxv.housekeeping_readonly
if self.readonly:
LOG.info('Housekeeper initialized in readonly mode')
else:
LOG.info('Housekeeper initialized')
self.jobs = {}
super(NsxvHousekeeper, self).__init__(
hk_ns, hk_jobs, invoke_on_load=True, invoke_args=(self.readonly,))
LOG.info("Loaded housekeeping job names: %s", self.names())
for job in self:
if job.obj.get_name() in cfg.CONF.nsxv.housekeeping_jobs:
self.jobs[job.obj.get_name()] = job.obj
def get(self, job_name):
if job_name == ALL_DUMMY_JOB.get('name'):
return ALL_DUMMY_JOB
for job in self:
name = job.obj.get_name()
if job_name == name:
return {'name': job_name,
'description': job.obj.get_description(),
'enabled': job_name in self.jobs}
raise n_exc.ObjectNotFound(id=job_name)
def list(self):
results = [ALL_DUMMY_JOB]
for job in self:
job_name = job.obj.get_name()
results.append({'name': job_name,
'description': job.obj.get_description(),
'enabled': job_name in self.jobs})
return results
def run(self, context, job_name):
if context.is_admin:
with locking.LockManager.get_lock('nsx-housekeeper'):
if job_name == ALL_DUMMY_JOB.get('name'):
for job in self.jobs.values():
job.run(context)
else:
job = self.jobs.get(job_name)
if job:
job.run(context)
else:
raise n_exc.ObjectNotFound(id=job_name)
else:
raise n_exc.AdminRequired()

View File

@ -0,0 +1,267 @@
# Copyright 2017 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants
from oslo_log import log
from oslo_utils import uuidutils
from vmware_nsx.common import locking
from vmware_nsx.db import nsxv_db
from vmware_nsx.plugins.common.housekeeper import base_job
from vmware_nsx.plugins.nsx_v.vshield.common import constants as vcns_const
LOG = log.getLogger(__name__)
class ErrorDhcpEdgeJob(base_job.BaseJob):
def get_name(self):
return 'error_dhcp_edge'
def get_description(self):
return 'revalidate DHCP Edge appliances in ERROR state'
def run(self, context):
super(ErrorDhcpEdgeJob, self).run(context)
# Gather ERROR state DHCP edges into dict
filters = {'status': [constants.ERROR]}
error_edge_bindings = nsxv_db.get_nsxv_router_bindings(
context.session, filters=filters)
if not error_edge_bindings:
LOG.debug('Housekeeping: no DHCP edges in ERROR state detected')
return
with locking.LockManager.get_lock('nsx-dhcp-edge-pool'):
edge_dict = {}
for binding in error_edge_bindings:
if binding['router_id'].startswith(
vcns_const.DHCP_EDGE_PREFIX):
bind_list = edge_dict.get(binding['edge_id'],
[])
bind_list.append(binding)
edge_dict[binding['edge_id']] = bind_list
# Get valid neutron networks and create a prefix dict.
networks = [net['id'] for net in
self.plugin.get_networks(context, fields=['id'])]
pfx_dict = {net[:36 - len(vcns_const.DHCP_EDGE_PREFIX)]: net
for net in networks}
for edge_id in edge_dict.keys():
try:
self._validate_dhcp_edge(
context, edge_dict, pfx_dict, networks, edge_id)
except Exception as e:
LOG.error('Failed to recover DHCP Edge %s (%s)',
edge_id, e)
def _validate_dhcp_edge(
self, context, edge_dict, pfx_dict, networks, edge_id):
# Also metadata network should be a valid network for the edge
az_name = self.plugin.get_availability_zone_name_by_edge(context,
edge_id)
with locking.LockManager.get_lock(edge_id):
vnic_binds = nsxv_db.get_edge_vnic_bindings_by_edge(
context.session, edge_id)
edge_networks = [bind['network_id'] for bind in vnic_binds]
# Step (A)
# Find router bindings which are mapped to dead networks, or
# do not have interfaces registered in nsxv tables
for binding in edge_dict[edge_id]:
router_id = binding['router_id']
net_pfx = router_id[len(vcns_const.DHCP_EDGE_PREFIX):]
net_id = pfx_dict.get(net_pfx)
if net_id is None:
# Delete router binding as we do not have such network
# in Neutron
LOG.warning('Housekeeping: router binding %s for edge '
'%s has no matching neutron network',
router_id, edge_id)
if not self.readonly:
nsxv_db.delete_nsxv_router_binding(
context.session, binding['router_id'])
else:
if net_id not in edge_networks:
# Create vNic bind here
LOG.warning('Housekeeping: edge %s vnic binding '
'missing for network %s', edge_id,
net_id)
if not self.readonly:
nsxv_db.allocate_edge_vnic_with_tunnel_index(
context.session, edge_id, net_id, az_name)
# Step (B)
# Find vNic bindings which reference invalid networks or aren't
# bound to any router binding
# Reread vNic binds as we might created more or deleted some in
# step (A)
vnic_binds = nsxv_db.get_edge_vnic_bindings_by_edge(
context.session, edge_id)
for bind in vnic_binds:
if bind['network_id'] not in networks:
LOG.warning('Housekeeping: edge vnic binding for edge '
'%s is for invalid network id %s',
edge_id, bind['network_id'])
if not self.readonly:
nsxv_db.free_edge_vnic_by_network(
context.session, edge_id, bind['network_id'])
# Step (C)
# Verify that backend is in sync with Neutron
# Reread vNic binds as we might deleted some in step (B)
vnic_binds = nsxv_db.get_edge_vnic_bindings_by_edge(
context.session, edge_id)
# Transform to network-keyed dict
vnic_dict = {vnic['network_id']: {
'vnic_index': vnic['vnic_index'],
'tunnel_index': vnic['tunnel_index']
} for vnic in vnic_binds}
backend_vnics = self.plugin.nsx_v.vcns.get_interfaces(
edge_id)[1].get('vnics', [])
if_changed = {}
self._validate_edge_subinterfaces(
context, edge_id, backend_vnics, vnic_dict, if_changed)
self._add_missing_subinterfaces(
context, edge_id, vnic_binds, backend_vnics, if_changed)
if not self.readonly:
for vnic in backend_vnics:
if if_changed[vnic['index']]:
self.plugin.nsx_v.vcns.update_interface(edge_id,
vnic)
self._update_router_bindings(context, edge_id)
def _validate_edge_subinterfaces(self, context, edge_id, backend_vnics,
vnic_dict, if_changed):
# Validate that all the interfaces on the Edge
# appliance are registered in nsxv_edge_vnic_bindings
for vnic in backend_vnics:
if_changed[vnic['index']] = False
if (vnic['isConnected'] and vnic['type'] == 'trunk'
and vnic['subInterfaces']):
for sub_if in vnic['subInterfaces']['subInterfaces']:
# Subinterface name field contains the net id
vnic_bind = vnic_dict.get(sub_if['logicalSwitchName'])
if (vnic_bind
and vnic_bind['vnic_index'] == vnic['index']
and vnic_bind['tunnel_index'] == sub_if['tunnelId']):
pass
else:
LOG.warning('Housekeeping: subinterface %s for vnic '
'%s on edge %s is not defined in '
'nsxv_edge_vnic_bindings',
sub_if['tunnelId'],
vnic['index'], edge_id)
if_changed[vnic['index']] = True
vnic['subInterfaces']['subInterfaces'].remove(sub_if)
def _add_missing_subinterfaces(self, context, edge_id, vnic_binds,
backend_vnics, if_changed):
# Verify that all the entries in
# nsxv_edge_vnic_bindings are attached on the Edge
# Arrange the vnic binds in a list of lists - vnics and subinterfaces
metadata_nets = [
net['network_id'] for net in
nsxv_db.get_nsxv_internal_networks(
context.session,
vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE)]
for vnic_bind in vnic_binds:
if vnic_bind['network_id'] in metadata_nets:
continue
for vnic in backend_vnics:
if vnic['index'] == vnic_bind['vnic_index']:
found = False
tunnel_index = vnic_bind['tunnel_index']
network_id = vnic_bind['network_id']
for sub_if in (vnic.get('subInterfaces', {}).get(
'subInterfaces', [])):
if sub_if['tunnelId'] == tunnel_index:
found = True
if sub_if.get('logicalSwitchName') != network_id:
LOG.warning('Housekeeping: subinterface %s on '
'vnic %s on edge %s should be '
'connected to network %s',
tunnel_index, vnic['index'],
edge_id, network_id)
if_changed[vnic['index']] = True
if not self.readonly:
self._recreate_vnic_subinterface(
context, network_id, edge_id, vnic,
tunnel_index)
sub_if['name'] = network_id
if not found:
LOG.warning('Housekeeping: subinterface %s on vnic '
'%s on edge %s should be connected to '
'network %s but is missing', tunnel_index,
vnic['index'], edge_id, network_id)
if_changed[vnic['index']] = True
if not self.readonly:
self._recreate_vnic_subinterface(
context, network_id, edge_id, vnic,
tunnel_index)
def _recreate_vnic_subinterface(
self, context, network_id, edge_id, vnic, tunnel_index):
vnic_index = vnic['index']
network_name_item = [edge_id, str(vnic_index), str(tunnel_index)]
network_name = ('-'.join(network_name_item) +
uuidutils.generate_uuid())[:36]
port_group_id = vnic.get('portgroupId')
address_groups = self.plugin._create_network_dhcp_address_group(
context, network_id)
port_group_id, iface = self.plugin.edge_manager._create_sub_interface(
context, network_id, network_name, tunnel_index,
address_groups, port_group_id)
if not vnic.get('subInterfaces'):
vnic['subInterfaces'] = {'subInterfaces': []}
vnic['subInterfaces']['subInterfaces'].append(iface)
if vnic['type'] != 'trunk':
# reinitialize the interface as it is missing config
vnic['name'] = (vcns_const.INTERNAL_VNIC_NAME +
str(vnic['index']))
vnic['type'] = 'trunk'
vnic['portgroupId'] = port_group_id
vnic['mtu'] = 1500
vnic['enableProxyArp'] = False
vnic['enableSendRedirects'] = True
vnic['isConnected'] = True
def _update_router_bindings(self, context, edge_id):
edge_router_binds = nsxv_db.get_nsxv_router_bindings_by_edge(
context.session, edge_id)
for b in edge_router_binds:
nsxv_db.update_nsxv_router_binding(
context.session, b['router_id'], status='ACTIVE')

View File

@ -118,6 +118,7 @@ from vmware_nsx.extensions import (
vnicindex as ext_vnic_idx)
from vmware_nsx.extensions import dhcp_mtu as ext_dhcp_mtu
from vmware_nsx.extensions import dns_search_domain as ext_dns_search_domain
from vmware_nsx.extensions import housekeeper as hk_ext
from vmware_nsx.extensions import maclearning as mac_ext
from vmware_nsx.extensions import nsxpolicy
from vmware_nsx.extensions import projectpluginmap
@ -126,6 +127,7 @@ from vmware_nsx.extensions import routersize
from vmware_nsx.extensions import secgroup_rule_local_ip_prefix
from vmware_nsx.extensions import securitygrouplogging as sg_logging
from vmware_nsx.extensions import securitygrouppolicy as sg_policy
from vmware_nsx.plugins.common.housekeeper import housekeeper
from vmware_nsx.plugins.common import plugin as nsx_plugin_common
from vmware_nsx.plugins.nsx import utils as tvd_utils
from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az
@ -172,7 +174,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
dns_db.DNSDbMixin, nsxpolicy.NsxPolicyPluginBase,
vlantransparent_db.Vlantransparent_db_mixin,
nsx_com_az.NSXAvailabilityZonesPluginCommon,
mac_db.MacLearningDbMixin):
mac_db.MacLearningDbMixin,
hk_ext.Housekeeper):
supported_extension_aliases = ["agent",
"allowed-address-pairs",
@ -203,7 +206,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
"l3-flavors",
"flavors",
"dhcp-mtu",
"mac-learning"]
"mac-learning",
"housekeeper"]
__native_bulk_support = True
__native_pagination_support = True
@ -220,6 +224,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
floatingip=l3_db_models.FloatingIP)
def __init__(self):
self._is_sub_plugin = tvd_utils.is_tvd_core_plugin()
self.housekeeper = None
super(NsxVPluginV2, self).__init__()
if self._is_sub_plugin:
extension_drivers = cfg.CONF.nsx_tvd.nsx_v_extension_drivers
@ -348,6 +353,10 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
nsx_v_md_proxy.NsxVMetadataProxyHandler(
self, az))
self.housekeeper = housekeeper.NsxvHousekeeper(
hk_ns='vmware_nsx.neutron.nsxv.housekeeper.jobs',
hk_jobs=['error_dhcp_edge'])
self.init_is_complete = True
def _validate_nsx_version(self):
@ -3274,7 +3283,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
net_res[az_def.AZ_HINTS] = az_validator.convert_az_string_to_list(
net_db[az_def.AZ_HINTS])
def _get_availability_zone_name_by_edge(self, context, edge_id):
def get_availability_zone_name_by_edge(self, context, edge_id):
az_name = nsxv_db.get_edge_availability_zone(
context.session, edge_id)
if az_name:
@ -4643,3 +4652,14 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
if not self._nsx_policy_is_hidden(policy):
results.append(self._nsx_policy_to_dict(policy))
return results
def get_housekeeper(self, context, name, fields=None):
return self.housekeeper.get(name)
def get_housekeepers(self, context, filters=None, fields=None, sorts=None,
limit=None, marker=None, page_reverse=False):
return self.housekeeper.list()
def update_housekeeper(self, context, name, housekeeper):
self.housekeeper.run(context, name)
return self.housekeeper.get(name)

View File

@ -0,0 +1,486 @@
# Copyright 2017 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import mock
from neutron.tests import base
from neutron_lib import constants
from vmware_nsx.plugins.nsx_v.housekeeper import error_dhcp_edge
FAKE_ROUTER_BINDINGS = [
{
'router_id': 'dhcp-16c224dd-7c2b-4241-a447-4fc07a3', 'status': 'ERROR',
'availability_zone': 'default', 'edge_id': 'edge-752'},
{
'router_id': 'dhcp-31341032-6911-4596-8b64-afce92f', 'status': 'ERROR',
'availability_zone': 'default', 'edge_id': 'edge-752'},
{
'router_id': 'dhcp-51c97abb-8ac9-4f24-b914-cc30cf8', 'status': 'ERROR',
'availability_zone': 'default', 'edge_id': 'edge-752'},
{
'router_id': 'dhcp-5d01cea4-58f8-4a16-9be0-11012ca', 'status': 'ERROR',
'availability_zone': 'default', 'edge_id': 'edge-752'},
{
'router_id': 'dhcp-65a5335c-4c72-4721-920e-5abdc9e', 'status': 'ERROR',
'availability_zone': 'default', 'edge_id': 'edge-752'},
{
'router_id': 'dhcp-83bce421-b72c-4744-9285-a0fcc25', 'status': 'ERROR',
'availability_zone': 'default', 'edge_id': 'edge-752'},
{
'router_id': 'dhcp-9d2f5b66-c252-4681-86af-9460484', 'status': 'ERROR',
'availability_zone': 'default', 'edge_id': 'edge-752'},
{
'router_id': 'dhcp-aea44408-0448-42dd-9ae6-ed940da', 'status': 'ERROR',
'availability_zone': 'default', 'edge_id': 'edge-752'}]
BAD_ROUTER_BINDING = {
'router_id': 'dhcp-11111111-1111-1111-aaaa-aaaaaaa', 'status': 'ERROR',
'availability_zone': 'default', 'edge_id': 'edge-752'}
FAKE_EDGE_VNIC_BINDS = [
{
'network_id': '7c0b6fb5-d86c-4e5e-a2af-9ce36971764b',
'vnic_index': 1, 'edge_id': 'edge-752', 'tunnel_index': 1},
{
'network_id': '16c224dd-7c2b-4241-a447-4fc07a38dc80',
'vnic_index': 2, 'edge_id': 'edge-752', 'tunnel_index': 4},
{
'network_id': '65a5335c-4c72-4721-920e-5abdc9e09ba4',
'vnic_index': 2, 'edge_id': 'edge-752', 'tunnel_index': 6},
{
'network_id': 'aea44408-0448-42dd-9ae6-ed940dac564a',
'vnic_index': 4, 'edge_id': 'edge-752', 'tunnel_index': 10},
{
'network_id': '5d01cea4-58f8-4a16-9be0-11012cadbf55',
'vnic_index': 4, 'edge_id': 'edge-752', 'tunnel_index': 12},
{
'network_id': '51c97abb-8ac9-4f24-b914-cc30cf8e856a',
'vnic_index': 6, 'edge_id': 'edge-752', 'tunnel_index': 16},
{
'network_id': '31341032-6911-4596-8b64-afce92f46bf4',
'vnic_index': 6, 'edge_id': 'edge-752', 'tunnel_index': 18},
{
'network_id': '9d2f5b66-c252-4681-86af-946048414a1f',
'vnic_index': 8, 'edge_id': 'edge-752', 'tunnel_index': 22},
{
'network_id': '83bce421-b72c-4744-9285-a0fcc25b001a',
'vnic_index': 8, 'edge_id': 'edge-752', 'tunnel_index': 24}]
BAD_VNIC_BINDING = {
'network_id': '11111111-1111-1111-aaaa-aaaaaaabbaac',
'vnic_index': 8, 'edge_id': 'edge-752', 'tunnel_index': 21}
FAKE_INTERNAL_NETWORKS = [
{'availability_zone': u'default',
'network_id': u'7c0b6fb5-d86c-4e5e-a2af-9ce36971764b',
'network_purpose': 'inter_edge_net', 'updated_at': None,
'_rev_bumped': False,
'created_at': datetime.datetime(2017, 12, 13, 12, 28, 18)}]
FAKE_NETWORK_RESULTS = [{'id': 'e3a02b46-b9c9-4f2f-bcea-7978355a7dca'},
{'id': '031eaf4b-49b8-4003-9369-8a0dd5d7a163'},
{'id': '16c224dd-7c2b-4241-a447-4fc07a38dc80'},
{'id': '1a3b570c-c8b5-411e-8e13-d4dc0b3e56b2'},
{'id': '24b31d2c-fcec-45e5-bdcb-aa089d3713ae'},
{'id': '31341032-6911-4596-8b64-afce92f46bf4'},
{'id': '51c97abb-8ac9-4f24-b914-cc30cf8e856a'},
{'id': '5484b39b-ec6e-43f4-b900-fc1b2c49c71a'},
{'id': '54eae237-3516-4f82-b46f-f955e91c989c'},
{'id': '5a859fa0-bea0-41be-843a-9f9bf39e2509'},
{'id': '5d01cea4-58f8-4a16-9be0-11012cadbf55'},
{'id': '65a5335c-4c72-4721-920e-5abdc9e09ba4'},
{'id': '708f11d4-00d0-48ea-836f-01273cbf36cc'},
{'id': '7c0b6fb5-d86c-4e5e-a2af-9ce36971764b'},
{'id': '83bce421-b72c-4744-9285-a0fcc25b001a'},
{'id': '9d2f5b66-c252-4681-86af-946048414a1f'},
{'id': 'aea44408-0448-42dd-9ae6-ed940dac564a'},
{'id': 'b0cee4e3-266b-48d3-a651-04f1985fe4b0'},
{'id': 'be82b8c5-96a9-4e08-a965-bb09d48ec161'},
{'id': 'e69279c6-9a1e-4f7b-b421-b8b3eb92c54b'}]
BACKEND_EDGE_VNICS = {'vnics': [
{'label': 'vNic_0', 'name': 'external',
'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'uplink',
'isConnected': True, 'index': 0, 'portgroupId': 'network-13',
'fenceParameters': [], 'enableProxyArp': False,
'enableSendRedirects': True},
{'label': 'vNic_1', 'name': 'internal1', 'addressGroups': {
'addressGroups': [
{'primaryAddress': '169.254.128.14',
'secondaryAddresses': {
'type': 'secondary_addresses',
'ipAddress': ['169.254.169.254']},
'subnetMask': '255.255.128.0',
'subnetPrefixLength': '17'}]}, 'mtu': 1500,
'type': 'internal', 'isConnected': True, 'index': 1,
'portgroupId': 'virtualwire-472',
'fenceParameters': [], 'enableProxyArp': False,
'enableSendRedirects': True},
{'label': 'vNic_2', 'name': 'internal2',
'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'trunk',
'subInterfaces': {'subInterfaces': [
{'isConnected': True, 'label': 'vNic_10',
'name': '1639ff40-8137-4803-a29f-dcf0efc35b34', 'index': 10,
'tunnelId': 4, 'logicalSwitchId': 'virtualwire-497',
'logicalSwitchName': '16c224dd-7c2b-4241-a447-4fc07a38dc80',
'enableSendRedirects': True, 'mtu': 1500,
'addressGroups': {'addressGroups': [{
'primaryAddress': '10.24.0.2', 'subnetMask': '255.255.255.0',
'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5025,
'subInterfaceBackingType': 'NETWORK'},
{'isConnected': True, 'label': 'vNic_12',
'name': 'd1515746-a21a-442d-8347-62b36f5791d6', 'index': 12,
'tunnelId': 6, 'logicalSwitchId': 'virtualwire-499',
'logicalSwitchName': '65a5335c-4c72-4721-920e-5abdc9e09ba4',
'enableSendRedirects': True, 'mtu': 1500,
'addressGroups': {'addressGroups': [
{'primaryAddress': '10.26.0.2', 'subnetMask': '255.255.255.0',
'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5027,
'subInterfaceBackingType': 'NETWORK'}]}, 'isConnected': True,
'index': 2, 'portgroupId': 'dvportgroup-1550',
'fenceParameters': [], 'enableProxyArp': False,
'enableSendRedirects': True},
{'label': 'vNic_3', 'name': 'vnic3',
'addressGroups': {'addressGroups': []},
'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 3,
'fenceParameters': [], 'enableProxyArp': False,
'enableSendRedirects': True},
{'label': 'vNic_4', 'name': 'internal4',
'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'trunk',
'subInterfaces': {'subInterfaces': [
{'isConnected': True, 'label': 'vNic_16',
'name': 'e2405dc6-21d7-4421-a70c-3eecf675b286', 'index': 16,
'tunnelId': 10, 'logicalSwitchId': 'virtualwire-503',
'logicalSwitchName': 'aea44408-0448-42dd-9ae6-ed940dac564a',
'enableSendRedirects': True, 'mtu': 1500,
'addressGroups': {'addressGroups': [
{'primaryAddress': '10.30.0.2', 'subnetMask': '255.255.255.0',
'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5031,
'subInterfaceBackingType': 'NETWORK'},
{'isConnected': True, 'label': 'vNic_18',
'name': 'a10fb348-30e4-477f-817f-bb3c9c9fd3f5', 'index': 18,
'tunnelId': 12, 'logicalSwitchId': 'virtualwire-505',
'logicalSwitchName': '5d01cea4-58f8-4a16-9be0-11012cadbf55',
'enableSendRedirects': True, 'mtu': 1500,
'addressGroups': {'addressGroups': [
{'primaryAddress': '10.32.0.2', 'subnetMask': '255.255.255.0',
'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5033,
'subInterfaceBackingType': 'NETWORK'}]}, 'isConnected': True,
'index': 4, 'portgroupId': 'dvportgroup-1559',
'fenceParameters': [], 'enableProxyArp': False,
'enableSendRedirects': True},
{'label': 'vNic_5', 'name': 'vnic5',
'addressGroups': {'addressGroups': []},
'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 5,
'fenceParameters': [], 'enableProxyArp': False,
'enableSendRedirects': True},
{'label': 'vNic_6', 'name': 'internal6',
'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'trunk',
'subInterfaces': {'subInterfaces': [
{'isConnected': True, 'label': 'vNic_22',
'name': '2da534c8-3d9b-4677-aa14-2e66efd09e3f', 'index': 22,
'tunnelId': 16, 'logicalSwitchId': 'virtualwire-509',
'logicalSwitchName': '51c97abb-8ac9-4f24-b914-cc30cf8e856a',
'enableSendRedirects': True, 'mtu': 1500,
'addressGroups': {'addressGroups': [
{'primaryAddress': '10.36.0.2', 'subnetMask': '255.255.255.0',
'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5037,
'subInterfaceBackingType': 'NETWORK'},
{'isConnected': True, 'label': 'vNic_24',
'name': 'd25f00c2-eb82-455c-87b9-d2d510d42917', 'index': 24,
'tunnelId': 18, 'logicalSwitchId': 'virtualwire-511',
'logicalSwitchName': '31341032-6911-4596-8b64-afce92f46bf4',
'enableSendRedirects': True, 'mtu': 1500,
'addressGroups': {'addressGroups': [
{'primaryAddress': '10.38.0.2', 'subnetMask': '255.255.255.0',
'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5039,
'subInterfaceBackingType': 'NETWORK'}]}, 'isConnected': True,
'index': 6, 'portgroupId': 'dvportgroup-1567',
'fenceParameters': [], 'enableProxyArp': False,
'enableSendRedirects': True},
{'label': 'vNic_7', 'name': 'vnic7',
'addressGroups': {'addressGroups': []},
'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 7,
'fenceParameters': [], 'enableProxyArp': False,
'enableSendRedirects': True},
{'label': 'vNic_8', 'name': 'internal8',
'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'trunk',
'subInterfaces': {'subInterfaces': [
{'isConnected': True, 'label': 'vNic_28',
'name': 'cf4cc867-e958-4f86-acea-d8a52a4c26c8', 'index': 28,
'tunnelId': 22, 'logicalSwitchId': 'virtualwire-515',
'logicalSwitchName': '9d2f5b66-c252-4681-86af-946048414a1f',
'enableSendRedirects': True, 'mtu': 1500,
'addressGroups': {'addressGroups': [
{'primaryAddress': '10.42.0.2', 'subnetMask': '255.255.255.0',
'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5043,
'subInterfaceBackingType': 'NETWORK'},
{'isConnected': True, 'label': 'vNic_30',
'name': 'ceab3d83-3ee2-4372-b5d7-f1d47be76e9d', 'index': 30,
'tunnelId': 24, 'logicalSwitchId': 'virtualwire-517',
'logicalSwitchName': '83bce421-b72c-4744-9285-a0fcc25b001a',
'enableSendRedirects': True, 'mtu': 1500,
'addressGroups': {'addressGroups': [
{'primaryAddress': '10.44.0.2', 'subnetMask': '255.255.255.0',
'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5045,
'subInterfaceBackingType': 'NETWORK'}]}, 'isConnected': True,
'index': 8, 'portgroupId': 'dvportgroup-1575',
'fenceParameters': [], 'enableProxyArp': False,
'enableSendRedirects': True},
{'label': 'vNic_9', 'name': 'vnic9',
'addressGroups': {'addressGroups': []},
'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 9,
'fenceParameters': [], 'enableProxyArp': False,
'enableSendRedirects': True}]}
BAD_SUBINTERFACE = {
'isConnected': True, 'label': 'vNic_31',
'name': '11111111-2222-3333-4444-555555555555', 'index': 31,
'tunnelId': 25, 'logicalSwitchId': 'virtualwire-518',
'logicalSwitchName': '55555555-4444-3333-2222-111111111111',
'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': {
'addressGroups': [
{'primaryAddress': '10.99.0.2', 'subnetMask': '255.255.255.0',
'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5045,
'subInterfaceBackingType': 'NETWORK'}
BAD_INTERFACE = {
'label': 'vNic_8', 'name': 'vnic8',
'addressGroups': {'addressGroups': []},
'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 8,
'fenceParameters': [], 'enableProxyArp': False,
'enableSendRedirects': True}
class ErrorDhcpEdgeTestCaseReadOnly(base.BaseTestCase):
def _is_readonly(self):
return True
def setUp(self):
def get_plugin_mock(alias=constants.CORE):
if alias in (constants.CORE, constants.L3):
return self.plugin
super(ErrorDhcpEdgeTestCaseReadOnly, self).setUp()
self.plugin = mock.Mock()
self.context = mock.Mock()
self.context.session = mock.Mock()
mock.patch('neutron_lib.plugins.directory.get_plugin',
side_effect=get_plugin_mock).start()
self.plugin.edge_manager = mock.Mock()
self.plugin.nsx_v = mock.Mock()
self.plugin.nsx_v.vcns = mock.Mock()
mock.patch.object(self.plugin, 'get_availability_zone_name_by_edge',
return_value='default').start()
self.log = mock.Mock()
error_dhcp_edge.LOG = self.log
self.job = error_dhcp_edge.ErrorDhcpEdgeJob(self._is_readonly())
def test_clean_run(self):
mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings',
return_value=[]).start()
self.job.run(self.context)
self.log.warning.assert_not_called()
def test_invalid_router_binding(self):
router_binds = copy.deepcopy(FAKE_ROUTER_BINDINGS)
router_binds.append(BAD_ROUTER_BINDING)
mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings',
return_value=router_binds).start()
mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge',
return_value=FAKE_EDGE_VNIC_BINDS).start()
mock.patch.object(self.plugin, 'get_networks',
return_value=FAKE_NETWORK_RESULTS).start()
mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces',
return_value=(None, BACKEND_EDGE_VNICS)).start()
mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks',
return_value=FAKE_INTERNAL_NETWORKS).start()
self.job.run(self.context)
self.log.warning.assert_called_once()
def test_invalid_edge_vnic_bindings(self):
def fake_vnic_bind(*args, **kwargs):
# The DB content is manipulated by the housekeeper. Therefore
# get_edge_vnic_bindings_by_edge() output should be altered
if fake_vnic_bind.ctr < 2:
ret = fake_vnic_bind.vnic_binds
else:
ret = FAKE_EDGE_VNIC_BINDS
fake_vnic_bind.ctr += 1
return ret
fake_vnic_bind.ctr = 0
fake_vnic_bind.vnic_binds = copy.deepcopy(FAKE_EDGE_VNIC_BINDS)
fake_vnic_bind.vnic_binds.append(BAD_VNIC_BINDING)
mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings',
return_value=FAKE_ROUTER_BINDINGS).start()
mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge',
side_effect=fake_vnic_bind).start()
mock.patch.object(self.plugin, 'get_networks',
return_value=FAKE_NETWORK_RESULTS).start()
mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces',
return_value=(None, BACKEND_EDGE_VNICS)).start()
mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks',
return_value=FAKE_INTERNAL_NETWORKS).start()
self.job.run(self.context)
self.log.warning.assert_called_once()
def test_invalid_edge_sub_if(self):
backend_vnics = copy.deepcopy(BACKEND_EDGE_VNICS)
backend_vnics['vnics'][8]['subInterfaces']['subInterfaces'].append(
BAD_SUBINTERFACE)
mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings',
return_value=FAKE_ROUTER_BINDINGS).start()
mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge',
return_value=FAKE_EDGE_VNIC_BINDS).start()
mock.patch.object(self.plugin, 'get_networks',
return_value=FAKE_NETWORK_RESULTS).start()
mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces',
return_value=(None, backend_vnics)).start()
mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks',
return_value=FAKE_INTERNAL_NETWORKS).start()
self.job.run(self.context)
self.log.warning.assert_called_once()
def test_missing_edge_sub_if(self):
backend_vnics = copy.deepcopy(BACKEND_EDGE_VNICS)
del backend_vnics['vnics'][8]['subInterfaces']['subInterfaces'][1]
mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings',
return_value=FAKE_ROUTER_BINDINGS).start()
mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge',
return_value=FAKE_EDGE_VNIC_BINDS).start()
mock.patch.object(self.plugin, 'get_networks',
return_value=FAKE_NETWORK_RESULTS).start()
mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces',
return_value=(None, backend_vnics)).start()
mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks',
return_value=FAKE_INTERNAL_NETWORKS).start()
self.job.run(self.context)
self.log.warning.assert_called_once()
def test_missing_edge_interface(self):
backend_vnics = copy.deepcopy(BACKEND_EDGE_VNICS)
backend_vnics['vnics'][8] = BAD_INTERFACE
mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings',
return_value=FAKE_ROUTER_BINDINGS).start()
mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge',
return_value=FAKE_EDGE_VNIC_BINDS).start()
mock.patch.object(self.plugin, 'get_networks',
return_value=FAKE_NETWORK_RESULTS).start()
mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces',
return_value=(None, backend_vnics)).start()
mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks',
return_value=FAKE_INTERNAL_NETWORKS).start()
self.job.run(self.context)
self.assertEqual(2, self.log.warning.call_count)
class ErrorDhcpEdgeTestCaseReadWrite(ErrorDhcpEdgeTestCaseReadOnly):
def _is_readonly(self):
return False
def test_invalid_router_binding(self):
del_binding = mock.patch(
'vmware_nsx.db.nsxv_db.delete_nsxv_router_binding').start()
mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge',
return_value=FAKE_ROUTER_BINDINGS).start()
upd_binding = mock.patch(
'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start()
super(ErrorDhcpEdgeTestCaseReadWrite, self
).test_invalid_router_binding()
del_binding.assert_called_with(mock.ANY,
BAD_ROUTER_BINDING['router_id'])
upd_binding.assert_has_calls(
[mock.call(mock.ANY, r['router_id'], status='ACTIVE')
for r in FAKE_ROUTER_BINDINGS])
def test_invalid_edge_vnic_bindings(self):
del_binding = mock.patch(
'vmware_nsx.db.nsxv_db.free_edge_vnic_by_network').start()
mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge',
return_value=FAKE_ROUTER_BINDINGS).start()
upd_binding = mock.patch(
'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start()
super(ErrorDhcpEdgeTestCaseReadWrite, self
).test_invalid_edge_vnic_bindings()
del_binding.assert_called_with(mock.ANY, BAD_VNIC_BINDING['edge_id'],
BAD_VNIC_BINDING['network_id'])
upd_binding.assert_has_calls(
[mock.call(mock.ANY, r['router_id'], status='ACTIVE')
for r in FAKE_ROUTER_BINDINGS])
def test_invalid_edge_sub_if(self):
mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge',
return_value=FAKE_ROUTER_BINDINGS).start()
upd_binding = mock.patch(
'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start()
upd_if = mock.patch.object(self.plugin.nsx_v.vcns,
'update_interface').start()
super(ErrorDhcpEdgeTestCaseReadWrite, self
).test_invalid_edge_sub_if()
upd_binding.assert_has_calls(
[mock.call(mock.ANY, r['router_id'], status='ACTIVE')
for r in FAKE_ROUTER_BINDINGS])
upd_if.assert_called_with('edge-752', BACKEND_EDGE_VNICS['vnics'][8])
def test_missing_edge_sub_if(self):
deleted_sub_if = BACKEND_EDGE_VNICS['vnics'][8]['subInterfaces'][
'subInterfaces'][1]
mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge',
return_value=FAKE_ROUTER_BINDINGS).start()
mock.patch.object(
self.plugin.edge_manager, '_create_sub_interface',
return_value=('dvportgroup-1575', deleted_sub_if)).start()
upd_binding = mock.patch(
'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start()
upd_if = mock.patch.object(self.plugin.nsx_v.vcns,
'update_interface').start()
super(ErrorDhcpEdgeTestCaseReadWrite, self
).test_missing_edge_sub_if()
upd_binding.assert_has_calls(
[mock.call(mock.ANY, r['router_id'], status='ACTIVE')
for r in FAKE_ROUTER_BINDINGS])
upd_if.assert_called_with('edge-752', BACKEND_EDGE_VNICS['vnics'][8])
def test_missing_edge_interface(self):
def fake_create_subif(*args, **kwargs):
deleted_sub_if = BACKEND_EDGE_VNICS['vnics'][8]['subInterfaces'][
'subInterfaces'][fake_create_subif.ctr]
fake_create_subif.ctr += 1
return (BACKEND_EDGE_VNICS['vnics'][8]['portgroupId'],
deleted_sub_if)
fake_create_subif.ctr = 0
mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge',
return_value=FAKE_ROUTER_BINDINGS).start()
mock.patch.object(
self.plugin.edge_manager, '_create_sub_interface',
side_effect=fake_create_subif).start()
upd_binding = mock.patch(
'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start()
upd_if = mock.patch.object(self.plugin.nsx_v.vcns,
'update_interface').start()
super(ErrorDhcpEdgeTestCaseReadWrite, self
).test_missing_edge_interface()
upd_binding.assert_has_calls(
[mock.call(mock.ANY, r['router_id'], status='ACTIVE')
for r in FAKE_ROUTER_BINDINGS])
upd_if.assert_called_with('edge-752', BACKEND_EDGE_VNICS['vnics'][8])