[PTP] Set context for multiple ptp instances and correct master status

Fixed two issues:
1. Issue where the default context for initiating each ptp service
context was incorrectly being copied by reference.

On startup, the initial context for each ptp service was set to default
values with a copy-by-reference resulting in updates propagating to each
instance when they shouldn't. Corrected the initialization to copy by
value instead. This fixes an issue where subscription notifications
were reporting the wrong status, ie. if one ptp4l instance is Freerun,
all ptp4l instances were being updated to Freerun. Fix ensures that only
the affected instance is updated.

2. Nodes running as master now change out of locked state when their
ports are FAULTY.

The original logic did not consider the port state for master nodes.
Added check to change the ptp4l lock status when ports enter a fault
state.

Test Plan:
PASS: Build and deploy ptp-notification app
PASS: Multiple ptp instance contexts are updated independently and
notifications now report correct state.
PASS: Master nodes transition their ptp4l status when ports go faulty.

Story: 2010056
Task: 46417

Signed-off-by: Cole Walker <cole.walker@windriver.com>
Change-Id: I3d2bfa62efeefc5aca282d80eccc5d825d7ef900
This commit is contained in:
Cole Walker 2022-09-22 17:19:05 -04:00
parent 87d19911a4
commit f63dd6dc17
4 changed files with 11 additions and 14 deletions

View File

@ -21,6 +21,7 @@ GRANDMASTER_IDENTITY = "grandmasterIdentity"
CLOCK_CLASS = "clockClass"
# expected values for valid ptp state
SLAVE_MODE = "slave"
MASTER_MODE = "master"
TIME_IS_TRACEABLE1 = "1"
TIME_IS_TRACEABLE2 = "true"
GM_IS_PRESENT = "true"

View File

@ -68,7 +68,6 @@ class GnssMonitor(Observer):
def update(self, subject, matched_line) -> None:
LOG.info("Kernel event detected. %s" % matched_line)
LOG.debug("GnssMonitor handler logic would run now")
self.set_gnss_status()
def set_gnss_status(self):

View File

@ -78,7 +78,9 @@ def check_results(result, total_ptp_keywords, port_count):
local_gm = True
LOG.debug("Local node is a GM")
for port in range(1, port_count + 1):
if result[constants.PORT.format(port)].lower() == constants.SLAVE_MODE or local_gm:
if result[constants.PORT.format(port)].lower() == constants.SLAVE_MODE:
break
elif local_gm and result[constants.PORT.format(port)].lower() == constants.MASTER_MODE:
break
else:
sync_state = constants.FREERUN_PHC_STATE

View File

@ -259,8 +259,7 @@ class PtpWatcherDefault:
# PTP Context
self.ptptracker_context = {}
for config in self.daemon_context['PTP4L_INSTANCES']:
self.ptptracker_context[config] = self.daemon_context.get(
'ptptracker_context', PtpWatcherDefault.DEFAULT_PTPTRACKER_CONTEXT)
self.ptptracker_context[config] = PtpWatcherDefault.DEFAULT_PTPTRACKER_CONTEXT.copy()
self.ptptracker_context[config]['sync_state'] = PtpState.Freerun
self.ptptracker_context[config]['last_event_time'] = self.init_time
self.ptp_device_simulated = "true" == self.ptptracker_context[config].get(
@ -272,8 +271,7 @@ class PtpWatcherDefault:
# GNSS Context
self.gnsstracker_context = {}
for config in self.daemon_context['GNSS_INSTANCES']:
self.gnsstracker_context[config] = self.daemon_context.get(
'gnsstracker_context', PtpWatcherDefault.DEFAULT_GNSSTRACKER_CONTEXT)
self.gnsstracker_context[config] = PtpWatcherDefault.DEFAULT_GNSSTRACKER_CONTEXT.copy()
self.gnsstracker_context[config]['sync_state'] = GnssState.Freerun
self.gnsstracker_context[config]['last_event_time'] = self.init_time
self.gnsstracker_context_lock = threading.Lock()
@ -281,16 +279,14 @@ class PtpWatcherDefault:
# OS Clock Context
self.osclocktracker_context = {}
self.osclocktracker_context = self.daemon_context.get(
'os_clock_tracker_context', PtpWatcherDefault.DEFAULT_OS_CLOCK_TRACKER_CONTEXT)
self.osclocktracker_context = PtpWatcherDefault.DEFAULT_OS_CLOCK_TRACKER_CONTEXT.copy()
self.osclocktracker_context['sync_state'] = OsClockState.Freerun
self.osclocktracker_context['last_event_time'] = self.init_time
self.osclocktracker_context_lock = threading.Lock()
# Overall Sync Context
self.overalltracker_context = {}
self.overalltracker_context = self.daemon_context.get(
'overall_sync_tracker_context', PtpWatcherDefault.DEFAULT_OVERALL_SYNC_TRACKER_CONTEXT)
self.overalltracker_context = PtpWatcherDefault.DEFAULT_OVERALL_SYNC_TRACKER_CONTEXT.copy()
self.overalltracker_context['sync_state'] = OverallClockState.Freerun
self.overalltracker_context['last_event_time'] = self.init_time
self.overalltracker_context_lock = threading.Lock()
@ -624,12 +620,12 @@ class PtpWatcherDefault:
new_event, sync_state, new_event_time = self.__get_ptp_status(
holdover_time, freq, sync_state, last_event_time, ptp_monitor)
LOG.info("%s PTP sync state: state is %s, new_event is %s" % (
ptp_monitor.ptp4l_service_name, sync_state, new_event))
ptp_monitor.ptp4l_service_name, sync_state, new_event))
new_clock_class_event, clock_class, clock_class_event_time = \
ptp_monitor.get_ptp_clock_class()
LOG.info("%s PTP clock class: clockClass is %s, new_event is %s" % (
ptp_monitor.ptp4l_service_name, clock_class, new_clock_class_event))
ptp_monitor.ptp4l_service_name, clock_class, new_clock_class_event))
if new_event or forced:
# update context
self.ptptracker_context_lock.acquire()
@ -637,7 +633,6 @@ class PtpWatcherDefault:
self.ptptracker_context[ptp_monitor.ptp4l_service_name][
'last_event_time'] = new_event_time
# publish new event
LOG.debug("Publish ptp status to clients")
lastStatus = {
@ -651,7 +646,7 @@ class PtpWatcherDefault:
'EventTimestamp': new_event_time
}
self.ptpeventproducer.publish_status(lastStatus, 'PTP')
lastStatus = {}
# publish new event in API version v2 format
resource_address = utils.format_resource_address(
self.node_name, constants.SOURCE_SYNC_PTP_LOCK_STATE)