Add an option for always synchronizing status
This patch adds an option for always synchronizing operational status on a show operation; regardless of the synchronization thread, when this option is enabled, the resource status is always fetched from the backend. The patch also fixes an issue observed when running test_nvp_sync alone. Bug #1229149 Bug #1229243 Change-Id: Ifef52ac1c48e32f413d51fc04fd926073335665d
This commit is contained in:
parent
f220b39d57
commit
0ce7966ab8
@ -140,6 +140,12 @@
|
|||||||
# requests to be less than min_sync_req_delay
|
# requests to be less than min_sync_req_delay
|
||||||
# min_chunk_size = 500
|
# min_chunk_size = 500
|
||||||
|
|
||||||
|
# Enable this option to allow punctual state synchronization on show
|
||||||
|
# operations. In this way, show operations will always fetch the operational
|
||||||
|
# status of the resource from the NVP backend, and this might have
|
||||||
|
# a considerable impact on overall performance.
|
||||||
|
# always_read_status = False
|
||||||
|
|
||||||
[vcns]
|
[vcns]
|
||||||
# URL for VCNS manager
|
# URL for VCNS manager
|
||||||
# manager_uri = https://management_ip
|
# manager_uri = https://management_ip
|
||||||
|
@ -1070,12 +1070,12 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
with context.session.begin(subtransactions=True):
|
with context.session.begin(subtransactions=True):
|
||||||
# goto to the plugin DB and fetch the network
|
# goto to the plugin DB and fetch the network
|
||||||
network = self._get_network(context, id)
|
network = self._get_network(context, id)
|
||||||
if fields and 'status' in fields:
|
if (self.nvp_sync_opts.always_read_status or
|
||||||
|
fields and 'status' in fields):
|
||||||
# External networks are not backed by nvp lswitches
|
# External networks are not backed by nvp lswitches
|
||||||
if not network.external:
|
if not network.external:
|
||||||
# Perform explicit state synchronization
|
# Perform explicit state synchronization
|
||||||
self._synchronizer.synchronize_network(
|
self._synchronizer.synchronize_network(context, network)
|
||||||
context, network)
|
|
||||||
# Don't do field selection here otherwise we won't be able
|
# Don't do field selection here otherwise we won't be able
|
||||||
# to add provider networks fields
|
# to add provider networks fields
|
||||||
net_result = self._make_network_dict(network)
|
net_result = self._make_network_dict(network)
|
||||||
@ -1356,7 +1356,8 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
|
|
||||||
def get_port(self, context, id, fields=None):
|
def get_port(self, context, id, fields=None):
|
||||||
with context.session.begin(subtransactions=True):
|
with context.session.begin(subtransactions=True):
|
||||||
if fields and 'status' in fields:
|
if (self.nvp_sync_opts.always_read_status or
|
||||||
|
fields and 'status' in fields):
|
||||||
# Perform explicit state synchronization
|
# Perform explicit state synchronization
|
||||||
db_port = self._get_port(context, id)
|
db_port = self._get_port(context, id)
|
||||||
self._synchronizer.synchronize_port(
|
self._synchronizer.synchronize_port(
|
||||||
@ -1366,7 +1367,8 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
|||||||
return super(NvpPluginV2, self).get_port(context, id, fields)
|
return super(NvpPluginV2, self).get_port(context, id, fields)
|
||||||
|
|
||||||
def get_router(self, context, id, fields=None):
|
def get_router(self, context, id, fields=None):
|
||||||
if fields and 'status' in fields:
|
if (self.nvp_sync_opts.always_read_status or
|
||||||
|
fields and 'status' in fields):
|
||||||
db_router = self._get_router(context, id)
|
db_router = self._get_router(context, id)
|
||||||
# Perform explicit state synchronization
|
# Perform explicit state synchronization
|
||||||
self._synchronizer.synchronize_router(
|
self._synchronizer.synchronize_router(
|
||||||
|
@ -70,7 +70,11 @@ sync_opts = [
|
|||||||
'exceed state_sync_interval')),
|
'exceed state_sync_interval')),
|
||||||
cfg.IntOpt('min_chunk_size', default=500,
|
cfg.IntOpt('min_chunk_size', default=500,
|
||||||
help=_('Minimum number of resources to be retrieved from NVP '
|
help=_('Minimum number of resources to be retrieved from NVP '
|
||||||
'during state synchronization'))
|
'during state synchronization')),
|
||||||
|
cfg.BoolOpt('always_read_status', default=False,
|
||||||
|
help=_('Always read operational status from backend on show '
|
||||||
|
'operations. Enabling this option might slow down '
|
||||||
|
'the system.'))
|
||||||
]
|
]
|
||||||
|
|
||||||
connection_opts = [
|
connection_opts = [
|
||||||
|
@ -287,16 +287,22 @@ class NvpSyncTestCase(base.BaseTestCase):
|
|||||||
args = ['--config-file', get_fake_conf('neutron.conf.test'),
|
args = ['--config-file', get_fake_conf('neutron.conf.test'),
|
||||||
'--config-file', get_fake_conf('nvp.ini.test')]
|
'--config-file', get_fake_conf('nvp.ini.test')]
|
||||||
config.parse(args=args)
|
config.parse(args=args)
|
||||||
|
cfg.CONF.set_override('allow_overlapping_ips', True)
|
||||||
self._plugin = NeutronPlugin.NvpPluginV2()
|
self._plugin = NeutronPlugin.NvpPluginV2()
|
||||||
|
# Mock neutron manager plugin load functions to speed up tests
|
||||||
mock_nm_get_plugin = mock.patch('neutron.manager.NeutronManager.'
|
mock_nm_get_plugin = mock.patch('neutron.manager.NeutronManager.'
|
||||||
'get_plugin')
|
'get_plugin')
|
||||||
|
mock_nm_get_service_plugins = mock.patch(
|
||||||
|
'neutron.manager.NeutronManager.get_service_plugins')
|
||||||
self.mock_nm_get_plugin = mock_nm_get_plugin.start()
|
self.mock_nm_get_plugin = mock_nm_get_plugin.start()
|
||||||
self.mock_nm_get_plugin.return_value = self._plugin
|
self.mock_nm_get_plugin.return_value = self._plugin
|
||||||
|
mock_nm_get_service_plugins.start()
|
||||||
super(NvpSyncTestCase, self).setUp()
|
super(NvpSyncTestCase, self).setUp()
|
||||||
self.addCleanup(self.fc.reset_all)
|
self.addCleanup(self.fc.reset_all)
|
||||||
self.addCleanup(patch_sync.stop)
|
self.addCleanup(patch_sync.stop)
|
||||||
self.addCleanup(mock_nvpapi.stop)
|
self.addCleanup(mock_nvpapi.stop)
|
||||||
self.addCleanup(mock_nm_get_plugin.stop)
|
self.addCleanup(mock_nm_get_plugin.stop)
|
||||||
|
self.addCleanup(mock_nm_get_service_plugins.stop)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
cfg.CONF.reset()
|
cfg.CONF.reset()
|
||||||
@ -547,10 +553,20 @@ class NvpSyncTestCase(base.BaseTestCase):
|
|||||||
exp_status = constants.NET_STATUS_ACTIVE
|
exp_status = constants.NET_STATUS_ACTIVE
|
||||||
self.assertEqual(exp_status, q_net['status'])
|
self.assertEqual(exp_status, q_net['status'])
|
||||||
|
|
||||||
|
def test_synchronize_network_on_get(self):
|
||||||
|
cfg.CONF.set_override('always_read_status', True, 'NVP_SYNC')
|
||||||
|
ctx = context.get_admin_context()
|
||||||
|
with self._populate_data(ctx):
|
||||||
|
# Put a network down to verify punctual synchronization
|
||||||
|
q_net_id = ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
|
||||||
|
self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false'
|
||||||
|
q_net_data = self._plugin.get_network(ctx, q_net_id)
|
||||||
|
self.assertEqual(constants.NET_STATUS_DOWN, q_net_data['status'])
|
||||||
|
|
||||||
def test_synchronize_port(self):
|
def test_synchronize_port(self):
|
||||||
ctx = context.get_admin_context()
|
ctx = context.get_admin_context()
|
||||||
with self._populate_data(ctx):
|
with self._populate_data(ctx):
|
||||||
# Put a network down to verify synchronization
|
# Put a port down to verify synchronization
|
||||||
lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0]
|
lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0]
|
||||||
lport = self.fc._fake_lswitch_lport_dict[lp_uuid]
|
lport = self.fc._fake_lswitch_lport_dict[lp_uuid]
|
||||||
q_port_id = self._get_tag_dict(lport['tags'])['q_port_id']
|
q_port_id = self._get_tag_dict(lport['tags'])['q_port_id']
|
||||||
@ -566,10 +582,23 @@ class NvpSyncTestCase(base.BaseTestCase):
|
|||||||
exp_status = constants.PORT_STATUS_ACTIVE
|
exp_status = constants.PORT_STATUS_ACTIVE
|
||||||
self.assertEqual(exp_status, q_port['status'])
|
self.assertEqual(exp_status, q_port['status'])
|
||||||
|
|
||||||
|
def test_synchronize_port_on_get(self):
|
||||||
|
cfg.CONF.set_override('always_read_status', True, 'NVP_SYNC')
|
||||||
|
ctx = context.get_admin_context()
|
||||||
|
with self._populate_data(ctx):
|
||||||
|
# Put a port down to verify punctual synchronization
|
||||||
|
lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0]
|
||||||
|
lport = self.fc._fake_lswitch_lport_dict[lp_uuid]
|
||||||
|
q_port_id = self._get_tag_dict(lport['tags'])['q_port_id']
|
||||||
|
lport['status'] = 'false'
|
||||||
|
q_port_data = self._plugin.get_port(ctx, q_port_id)
|
||||||
|
self.assertEqual(constants.PORT_STATUS_DOWN,
|
||||||
|
q_port_data['status'])
|
||||||
|
|
||||||
def test_synchronize_router(self):
|
def test_synchronize_router(self):
|
||||||
ctx = context.get_admin_context()
|
ctx = context.get_admin_context()
|
||||||
with self._populate_data(ctx):
|
with self._populate_data(ctx):
|
||||||
# Put a network down to verify synchronization
|
# Put a router down to verify synchronization
|
||||||
q_rtr_id = lr_uuid = self.fc._fake_lrouter_dict.keys()[0]
|
q_rtr_id = lr_uuid = self.fc._fake_lrouter_dict.keys()[0]
|
||||||
self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false'
|
self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false'
|
||||||
q_rtr_data = self._plugin._get_router(ctx, q_rtr_id)
|
q_rtr_data = self._plugin._get_router(ctx, q_rtr_id)
|
||||||
@ -583,6 +612,16 @@ class NvpSyncTestCase(base.BaseTestCase):
|
|||||||
exp_status = constants.NET_STATUS_ACTIVE
|
exp_status = constants.NET_STATUS_ACTIVE
|
||||||
self.assertEqual(exp_status, q_rtr['status'])
|
self.assertEqual(exp_status, q_rtr['status'])
|
||||||
|
|
||||||
|
def test_synchronize_router_on_get(self):
|
||||||
|
cfg.CONF.set_override('always_read_status', True, 'NVP_SYNC')
|
||||||
|
ctx = context.get_admin_context()
|
||||||
|
with self._populate_data(ctx):
|
||||||
|
# Put a router down to verify punctual synchronization
|
||||||
|
q_rtr_id = lr_uuid = self.fc._fake_lrouter_dict.keys()[0]
|
||||||
|
self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false'
|
||||||
|
q_rtr_data = self._plugin.get_router(ctx, q_rtr_id)
|
||||||
|
self.assertEqual(constants.NET_STATUS_DOWN, q_rtr_data['status'])
|
||||||
|
|
||||||
def test_sync_nvp_failure_backoff(self):
|
def test_sync_nvp_failure_backoff(self):
|
||||||
self.mock_nvpapi.return_value.request.side_effect = (
|
self.mock_nvpapi.return_value.request.side_effect = (
|
||||||
NvpApiClient.RequestTimeout)
|
NvpApiClient.RequestTimeout)
|
||||||
|
Loading…
Reference in New Issue
Block a user