NSXP: Update Octavia with object statuses
Send object updates about statuses: loadbalancers, listeners, pools, and members. Change-Id: Ifd893818c2ddb1325f3bed9f618b72754ed0689f
This commit is contained in:
parent
d366383af6
commit
2d5b7422ed
@ -528,6 +528,9 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
|
||||
def _get_octavia_stats_getter(self):
|
||||
return listener_mgr.stats_getter
|
||||
|
||||
def _get_octavia_status_getter(self):
|
||||
return loadbalancer_mgr.status_getter
|
||||
|
||||
def _init_lb_profiles(self):
|
||||
ssl_profile_client = self.nsxpolicy.load_balancer.client_ssl_profile
|
||||
with locking.LockManager.get_lock('nsxp_lb_profiles_init'):
|
||||
@ -554,7 +557,8 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
|
||||
self.octavia_stats_collector = (
|
||||
octavia_listener.NSXOctaviaStatisticsCollector(
|
||||
self,
|
||||
self._get_octavia_stats_getter()))
|
||||
self._get_octavia_stats_getter(),
|
||||
self._get_octavia_status_getter()))
|
||||
|
||||
def _init_octavia(self):
|
||||
octavia_objects = self._get_octavia_objects()
|
||||
|
@ -132,5 +132,8 @@ OFFLINE = 'OFFLINE'
|
||||
DEGRADED = 'DEGRADED'
|
||||
ENABLED = 'ENABLED'
|
||||
DISABLED = 'DISABLED'
|
||||
ACTIVE = 'ACTIVE'
|
||||
ERROR = 'ERROR'
|
||||
UNKNOWN = 'UNKNOWN'
|
||||
|
||||
VMWARE_LB_VIP_OWNER = 'vmware-lb-vip'
|
||||
|
@ -276,20 +276,11 @@ class EdgeLoadBalancerManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager):
|
||||
return {}
|
||||
|
||||
# get the loadbalancer status from the LB service
|
||||
lb_status = lb_const.ONLINE
|
||||
lb_status_results = service_status.get('results')
|
||||
lb_status = lb_const.ONLINE
|
||||
if lb_status_results:
|
||||
result = lb_status_results[0]
|
||||
if result.get('service_status'):
|
||||
# Use backend service_status
|
||||
lb_status = self._nsx_status_to_lb_status(
|
||||
result['service_status'])
|
||||
elif result.get('alarm'):
|
||||
# No status, but has alarms -> ERROR
|
||||
lb_status = lb_const.OFFLINE
|
||||
else:
|
||||
# Unknown - assume it is ok
|
||||
lb_status = lb_const.ONLINE
|
||||
lb_status = _get_octavia_lb_status(result)
|
||||
|
||||
statuses = {lb_const.LOADBALANCERS: [{'id': id, 'status': lb_status}],
|
||||
lb_const.LISTENERS: [],
|
||||
@ -300,21 +291,92 @@ class EdgeLoadBalancerManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager):
|
||||
# to add the listeners statuses from the virtual servers statuses
|
||||
return statuses
|
||||
|
||||
def _nsx_status_to_lb_status(self, nsx_status):
|
||||
if not nsx_status:
|
||||
# default fallback
|
||||
return lb_const.ONLINE
|
||||
|
||||
# Statuses that are considered ONLINE:
|
||||
if nsx_status.upper() in ['UP', 'UNKNOWN', 'PARTIALLY_UP',
|
||||
'NO_STANDBY']:
|
||||
return lb_const.ONLINE
|
||||
# Statuses that are considered OFFLINE:
|
||||
if nsx_status.upper() in ['PRIMARY_DOWN', 'DETACHED', 'DOWN', 'ERROR']:
|
||||
return lb_const.OFFLINE
|
||||
if nsx_status.upper() == 'DISABLED':
|
||||
return lb_const.DISABLED
|
||||
|
||||
def _nsx_status_to_lb_status(nsx_status):
|
||||
if not nsx_status:
|
||||
# default fallback
|
||||
LOG.debug("NSX LB status %s - interpreted as ONLINE", nsx_status)
|
||||
return lb_const.ONLINE
|
||||
|
||||
# Statuses that are considered ONLINE:
|
||||
if nsx_status.upper() in ['UP', 'UNKNOWN', 'PARTIALLY_UP',
|
||||
'NO_STANDBY']:
|
||||
return lb_const.ONLINE
|
||||
# Statuses that are considered OFFLINE:
|
||||
if nsx_status.upper() in ['PRIMARY_DOWN', 'DETACHED', 'DOWN', 'ERROR']:
|
||||
return lb_const.OFFLINE
|
||||
if nsx_status.upper() == 'DISABLED':
|
||||
return lb_const.DISABLED
|
||||
|
||||
# default fallback
|
||||
LOG.debug("NSX LB status %s - interpreted as ONLINE", nsx_status)
|
||||
return lb_const.ONLINE
|
||||
|
||||
|
||||
def _get_octavia_lb_status(result):
|
||||
if result.get('service_status'):
|
||||
# Use backend service_status
|
||||
lb_status = _nsx_status_to_lb_status(
|
||||
result['service_status'])
|
||||
elif result.get('alarm'):
|
||||
# No status, but has alarms -> ERROR
|
||||
lb_status = lb_const.OFFLINE
|
||||
else:
|
||||
# Unknown - assume it is ok
|
||||
lb_status = lb_const.ONLINE
|
||||
return lb_status
|
||||
|
||||
|
||||
def status_getter(context, core_plugin):
|
||||
nsxlib_lb = core_plugin.nsxpolicy.load_balancer
|
||||
lb_client = nsxlib_lb.lb_service
|
||||
lbs = lb_client.list()
|
||||
lb_statuses = []
|
||||
lsn_statuses = []
|
||||
pool_statuses = []
|
||||
member_statuses = []
|
||||
for lb in lbs:
|
||||
try:
|
||||
service_status = lb_client.get_status(lb['id'])
|
||||
if not isinstance(service_status, dict):
|
||||
service_status = {}
|
||||
except nsxlib_exc.ManagerError:
|
||||
LOG.warning("LB service %(lbs)s is not found",
|
||||
{'lbs': id})
|
||||
service_status = {}
|
||||
lb_status_results = service_status.get('results')
|
||||
if lb_status_results:
|
||||
result = lb_status_results[0]
|
||||
lb_operating_status = _get_octavia_lb_status(result)
|
||||
for vs_status in result.get('virtual_servers', []):
|
||||
vs_id = lib_p_utils.path_to_id(
|
||||
vs_status['virtual_server_path'])
|
||||
lsn_statuses.append({
|
||||
'id': vs_id,
|
||||
'operating_status': _nsx_status_to_lb_status(
|
||||
vs_status['status'])})
|
||||
for pool_status in result.get('pools', []):
|
||||
pool_id = lib_p_utils.path_to_id(pool_status['pool_path'])
|
||||
pool_statuses.append({
|
||||
'id': pool_id,
|
||||
'operating_status': _nsx_status_to_lb_status(
|
||||
pool_status['status'])})
|
||||
for member in pool_status.get('members', []):
|
||||
member_statuses.append({
|
||||
'pool_id': pool_id,
|
||||
'member_ip': member.get('ip_address'),
|
||||
'operating_status': _nsx_status_to_lb_status(
|
||||
member['status'])})
|
||||
|
||||
else:
|
||||
lb_operating_status = lb_const.OFFLINE
|
||||
|
||||
for tag in lb['tags']:
|
||||
if tag['scope'] == 'loadbalancer_id':
|
||||
lb_statuses.append(
|
||||
{'id': tag['tag'],
|
||||
'operating_status': lb_operating_status})
|
||||
|
||||
return {lb_const.LOADBALANCERS: lb_statuses,
|
||||
lb_const.LISTENERS: lsn_statuses,
|
||||
lb_const.POOLS: pool_statuses,
|
||||
lb_const.MEMBERS: member_statuses}
|
||||
|
@ -545,10 +545,26 @@ class NSXOctaviaDriver(driver_base.ProviderDriver):
|
||||
class NSXOctaviaDriverEndpoint(driver_lib.DriverLibrary):
|
||||
target = messaging.Target(namespace="control", version='1.0')
|
||||
|
||||
def __init__(self, status_socket=driver_lib.DEFAULT_STATUS_SOCKET,
|
||||
stats_socket=driver_lib.DEFAULT_STATS_SOCKET, **kwargs):
|
||||
super(NSXOctaviaDriverEndpoint, self).__init__(
|
||||
status_socket, stats_socket, **kwargs)
|
||||
self.repositories = repositories.Repositories()
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def update_loadbalancer_status(self, ctxt, status):
|
||||
# refresh the driver lib session
|
||||
self.db_session = db_apis.get_session()
|
||||
for member in status.get('members', []):
|
||||
if member.get('id'):
|
||||
pass
|
||||
elif member.get('member_ip') and member.get('pool_id'):
|
||||
db_member = self.repositories.member.get(
|
||||
self.db_session,
|
||||
pool_id=member['pool_id'],
|
||||
ip_address=member['member_ip'])
|
||||
if db_member:
|
||||
member['id'] = db_member.id
|
||||
try:
|
||||
return super(NSXOctaviaDriverEndpoint,
|
||||
self).update_loadbalancer_status(status)
|
||||
|
@ -300,6 +300,10 @@ class NSXOctaviaListenerEndpoint(object):
|
||||
kw = {'statistics': statistics}
|
||||
self.client.cast({}, 'update_listener_statistics', **kw)
|
||||
|
||||
def update_loadbalancer_status(self, status):
|
||||
kw = {'status': status}
|
||||
self.client.cast({}, 'update_loadbalancer_status', **kw)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def loadbalancer_create(self, ctxt, loadbalancer):
|
||||
ctx = neutron_context.Context(None, loadbalancer['project_id'])
|
||||
@ -625,9 +629,11 @@ class NSXOctaviaListenerEndpoint(object):
|
||||
|
||||
|
||||
class NSXOctaviaStatisticsCollector(object):
|
||||
def __init__(self, core_plugin, listener_stats_getter):
|
||||
def __init__(self, core_plugin, listener_stats_getter,
|
||||
loadbalancer_status_getter=None):
|
||||
self.core_plugin = core_plugin
|
||||
self.listener_stats_getter = listener_stats_getter
|
||||
self.loadbalancer_status_getter = loadbalancer_status_getter
|
||||
if cfg.CONF.octavia_stats_interval:
|
||||
eventlet.spawn_n(self.thread_runner,
|
||||
cfg.CONF.octavia_stats_interval)
|
||||
@ -646,8 +652,12 @@ class NSXOctaviaStatisticsCollector(object):
|
||||
|
||||
listeners_stats = self.listener_stats_getter(
|
||||
context, self.core_plugin)
|
||||
if not listeners_stats:
|
||||
if listeners_stats:
|
||||
# Avoid sending empty stats
|
||||
return
|
||||
stats = {'listeners': listeners_stats}
|
||||
endpoint.update_listener_statistics(stats)
|
||||
stats = {'listeners': listeners_stats}
|
||||
endpoint.update_listener_statistics(stats)
|
||||
|
||||
if self.loadbalancer_status_getter:
|
||||
loadbalancer_status = self.loadbalancer_status_getter(
|
||||
context, self.core_plugin)
|
||||
endpoint.update_loadbalancer_status(loadbalancer_status)
|
||||
|
Loading…
x
Reference in New Issue
Block a user