Add Notification Services and Notification Client Sidecar

Story: 2008529
Task: 41688

Signed-off-by: Bin Yang <bin.yang@windriver.com>
Change-Id: Ib276520605cc624a9976f804a1721ba2c5909403
This commit is contained in:
Bin Yang 2021-01-26 08:51:16 +08:00
parent c525a7fe47
commit 545e6b6bb0
113 changed files with 3882 additions and 0 deletions

View File

@ -0,0 +1,3 @@
notificationservice-base
locationservice-base
notificationclient-base

View File

@ -0,0 +1,22 @@
ARG BASE
FROM ${BASE}
ARG STX_REPO_FILE=/etc/yum.repos.d/stx.repo
ENV KUBE_LATEST_VERSION="v1.18.3"
RUN set -ex ;\
yum install --disablerepo=* \
$(grep '^name=' ${STX_REPO_FILE} | awk -F '=' '{printf "--enablerepo=" $2 " "}') \
-y \
gcc python3-devel python3-pip \
&& pip3 install --user pecan \
&& pip3 install oslo-config \
&& pip3 install oslo-messaging \
&& pip3 install WSME
WORKDIR /opt/
COPY ./locationservice /opt/locationservice
RUN cd /opt/locationservice && python3 setup.py develop
CMD ["bash"]

View File

@ -0,0 +1,15 @@
from pecan import make_app
from apiserver.repository.notification_control import notification_control
from pecan import conf
def setup_app(config):
notification_control.refresh()
app_conf = dict(config.app)
return make_app(
app_conf.pop('root'),
logging=getattr(config, 'logging', {}),
**app_conf
)

View File

@ -0,0 +1,20 @@
#coding=utf-8
from pecan import expose, redirect, rest, route, response
from webob.exc import status_map
from wsme import types as wtypes
from wsmeext.pecan import wsexpose
class HealthController(rest.RestController):
@wsexpose(wtypes.text)
def get(self):
return {'health': True}
class RootController(object):
pass
route(RootController, 'health', HealthController())

View File

@ -0,0 +1,17 @@
from pecan import conf # noqa
def init_model():
"""
This is a stub method which is called at application startup time.
If you need to bind to a parsed database configuration, set up tables or
ORM classes, or perform any database initialization, this is the
recommended place to do it.
For more information working with databases, and some common recipes,
see https://pecan.readthedocs.io/en/latest/databases.html
"""
pass

View File

@ -0,0 +1,26 @@
import os
import time
import json
from pecan import conf
from locationservicesdk.services.daemon import DaemonControl
REGISTRATION_USER = os.environ.get("REGISTRATION_USER", "admin")
REGISTRATION_PASS = os.environ.get("REGISTRATION_PASS", "admin")
REGISTRATION_PORT = os.environ.get("REGISTRATION_PORT", "5672")
REGISTRATION_HOST = os.environ.get("REGISTRATION_HOST",'registration.notification.svc.cluster.local')
THIS_NODE_NAME = os.environ.get("THIS_NODE_NAME",'controller-0')
THIS_POD_IP = os.environ.get("THIS_POD_IP",'127.0.0.1')
REGISTRATION_TRANSPORT_ENDPOINT = 'rabbit://{0}:{1}@{2}:{3}'.format(
REGISTRATION_USER, REGISTRATION_PASS, REGISTRATION_HOST, REGISTRATION_PORT)
sqlalchemy_conf_json=json.dumps({})
LocationInfo = {
'NodeName': THIS_NODE_NAME,
'PodIP': THIS_POD_IP,
'ResourceTypes': ['PTP'],
'Timestamp': time.time()
}
location_info_json = json.dumps(LocationInfo)
notification_control = DaemonControl(
sqlalchemy_conf_json, REGISTRATION_TRANSPORT_ENDPOINT, location_info_json)

View File

@ -0,0 +1,22 @@
<html>
<head>
<title>${self.title()}</title>
${self.style()}
${self.javascript()}
</head>
<body>
${self.body()}
</body>
</html>
<%def name="title()">
Default Title
</%def>
<%def name="style()">
<link rel="stylesheet" type="text/css" media="screen" href="/css/style.css" />
</%def>
<%def name="javascript()">
<script type="text/javascript" src="/javascript/shared.js"></script>
</%def>

View File

@ -0,0 +1,22 @@
import os
from unittest import TestCase
from pecan import set_config
from pecan.testing import load_test_app
__all__ = ['FunctionalTest']
class FunctionalTest(TestCase):
"""
Used for functional tests where you need to test your
literal application and its integration with the framework.
"""
def setUp(self):
self.app = load_test_app(os.path.join(
os.path.dirname(__file__),
'config.py'
))
def tearDown(self):
set_config({}, overwrite=True)

View File

@ -0,0 +1,25 @@
# Server Specific Configurations
server = {
'port': '8080',
'host': '0.0.0.0'
}
# Pecan Application Configurations
app = {
'root': 'notificationclient.controllers.root.RootController',
'modules': ['notificationclient'],
'static_root': '%(confdir)s/../../public',
'template_path': '%(confdir)s/../templates',
'debug': True,
'errors': {
'404': '/error/404',
'__force_dict__': True
}
}
# Custom Configurations must be in Python dictionary format::
#
# foo = {'bar':'baz'}
#
# All configurations are accessible at::
# pecan.conf

View File

@ -0,0 +1,22 @@
from unittest import TestCase
from webtest import TestApp
from notificationclient.tests import FunctionalTest
class TestRootController(FunctionalTest):
def test_get(self):
response = self.app.get('/')
assert response.status_int == 200
def test_search(self):
response = self.app.post('/', params={'q': 'RestController'})
assert response.status_int == 302
assert response.headers['Location'] == (
'https://pecan.readthedocs.io/en/latest/search.html'
'?q=RestController'
)
def test_get_not_found(self):
response = self.app.get('/a/bogus/url', expect_errors=True)
assert response.status_int == 404

View File

@ -0,0 +1,7 @@
from unittest import TestCase
class TestUnits(TestCase):
def test_units(self):
assert 5 * 5 == 25

View File

@ -0,0 +1,54 @@
# Server Specific Configurations
server = {
'port': '8080',
'host': '0.0.0.0'
}
# Pecan Application Configurations
app = {
'root': 'apiserver.controllers.root.RootController',
'modules': ['apiserver'],
'static_root': '%(confdir)s/public',
'template_path': '%(confdir)s/apiserver/templates',
'debug': True,
'errors': {
404: '/error/404',
'__force_dict__': True
}
}
logging = {
'root': {'level': 'INFO', 'handlers': ['console']},
'loggers': {
'apiserver': {'level': 'DEBUG', 'handlers': ['console'], 'propagate': False},
'pecan': {'level': 'DEBUG', 'handlers': ['console'], 'propagate': False},
'py.warnings': {'handlers': ['console']},
'__force_dict__': True
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'color'
}
},
'formatters': {
'simple': {
'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]'
'[%(threadName)s] %(message)s')
},
'color': {
'()': 'pecan.log.ColorFormatter',
'format': ('%(asctime)s [%(padded_color_levelname)s] [%(name)s]'
'[%(threadName)s] %(message)s'),
'__force_dict__': True
}
}
}
# Custom Configurations must be in Python dictionary format::
#
# foo = {'bar':'baz'}
#
# All configurations are accessible at::
# pecan.conf

View File

@ -0,0 +1,11 @@
Metadata-Version: 1.0
Name: locationservice
Version: 0.1
Summary: locationservice offers a container image for notificationservice
to manage location information
Home-page: UNKNOWN
Author: Bin Yang
Author-email: bin.yang@windriver.com
License: Apache License 2.0
Description: UNKNOWN
Platform: UNKNOWN

View File

@ -0,0 +1,20 @@
MANIFEST.in
setup.cfg
setup.py
apiserver/__init__.py
apiserver/app.py
apiserver.egg-info/PKG-INFO
apiserver.egg-info/SOURCES.txt
apiserver.egg-info/dependency_links.txt
apiserver.egg-info/not-zip-safe
apiserver.egg-info/requires.txt
apiserver.egg-info/top_level.txt
apiserver/controllers/__init__.py
apiserver/controllers/root.py
apiserver/model/__init__.py
apiserver/tests/__init__.py
apiserver/tests/config.py
apiserver/tests/test_functional.py
apiserver/tests/test_units.py
public/css/style.css
public/images/logo.png

View File

@ -0,0 +1,107 @@
import os
import json
import time
import oslo_messaging
from oslo_config import cfg
from locationservicesdk.common.helpers import rpc_helper
from locationservicesdk.model.dto.rpc_endpoint import RpcEndpointInfo
import logging
LOG = logging.getLogger(__name__)
from locationservicesdk.common.helpers import log_helper
log_helper.config_logger(LOG)
class BrokerClientBase(object):
def __init__(self, broker_name, broker_transport_endpoint):
self.broker_name = broker_name
self.listeners = {}
self.broker_endpoint = RpcEndpointInfo(broker_transport_endpoint)
self.transport = rpc_helper.get_transport(self.broker_endpoint)
LOG.debug("Created Broker client:{0}".format(broker_name))
def __del__(self):
self.transport.cleanup()
del self.transport
return
def __create_listener(self, context):
target = oslo_messaging.Target(
topic=context['topic'],
server=context['server'])
endpoints = context['endpoints']
server = oslo_messaging.get_rpc_server(
self.transport, target, endpoints, executor=None)
return server
def _refresh(self):
for topic, servers in self.listeners.items():
for servername, context in servers.items():
try:
rpcserver = context.get('rpcserver', None)
isactive = context.get('active', False)
if isactive and not rpcserver:
rpcserver = self.__create_listener(context)
rpcserver.start()
context['rpcserver'] = rpcserver
LOG.debug("Started rpcserver@{0}@{1}".format(context['topic'], context['server']))
elif not isactive and rpcserver:
rpcserver.stop()
rpcserver.wait()
context.pop('rpcserver')
LOG.debug("Stopped rpcserver@{0}@{1}".format(context['topic'], context['server']))
except:
LOG.error("Failed to update listener for topic/server:{0}/{1}"
.format(topic, servername))
continue
def add_listener(self, topic, server, listener_endpoints=None):
context = self.listeners.get(topic,{}).get(server, {})
if not context:
context = {
'endpoints': listener_endpoints,
'topic': topic,
'server': server,
'active': True
}
if not self.listeners.get(topic, None):
self.listeners[topic] = {}
self.listeners[topic][server] = context
else:
context['endpoints'] = listener_endpoints
context['active'] = True
self._refresh()
def remove_listener(self, topic, server):
context = self.listeners.get(topic,{}).get(server, {})
if context:
context['active'] = False
self._refresh()
def is_listening(self, topic, server):
context = self.listeners.get(topic,{}).get(server, {})
return context.get('active', False)
def any_listener(self):
for topic, servers in self.listeners.items():
for servername, context in servers.items():
isactive = context.get('active', False)
if isactive:
return True
return False
def call(self, topic, server, api_name, **api_kwargs):
target = oslo_messaging.Target(
topic=topic, server=server, version=self.broker_endpoint.Version,
namespace=self.broker_endpoint.Namespace)
queryclient = oslo_messaging.RPCClient(self.transport, target, timeout = 2, retry = 0)
return queryclient.call({}, api_name, **api_kwargs)
def cast(self, topic, api_name, **api_kwargs):
target = oslo_messaging.Target(
topic=topic, fanout=True, version=self.broker_endpoint.Version,
namespace=self.broker_endpoint.Namespace)
queryclient = oslo_messaging.RPCClient(self.transport, target)
queryclient.cast({}, api_name, **api_kwargs)

View File

@ -0,0 +1,84 @@
import os
import json
import time
import oslo_messaging
from oslo_config import cfg
from locationservicesdk.client.base import BrokerClientBase
import logging
LOG = logging.getLogger(__name__)
from locationservicesdk.common.helpers import log_helper
log_helper.config_logger(LOG)
class LocationProducer(BrokerClientBase):
class ListenerEndpoint(object):
target = oslo_messaging.Target(namespace='notification', version='1.0')
def __init__(self, location_info, handler=None):
self.location_info = location_info
self.handler = handler
pass
def QueryLocation(self, ctx, **rpc_kwargs):
LOG.debug ("LocationProducer QueryLocation called %s" %rpc_kwargs)
return self.location_info
def TriggerAnnouncement(self, ctx, **rpc_kwargs):
LOG.debug ("LocationProducer TriggerAnnouncement called %s" %rpc_kwargs)
if self.handler:
return self.handler.handle(**rpc_kwargs)
else:
return False
def __init__(self, node_name, registrationservice_transport_endpoint):
self.Id = id(self)
self.node_name = node_name
super(LocationProducer, self).__init__(
'locationproducer', registrationservice_transport_endpoint)
return
def __del__(self):
super(LocationProducer, self).__del__()
return
def announce_location(self, LocationInfo):
location_topic_all='LocationListener-*'
location_topic='LocationListener-{0}'.format(self.node_name)
server = None
while True:
try:
self.cast(location_topic_all, 'NotifyLocation', location_info=LocationInfo)
LOG.debug("Broadcast location info:{0}@Topic:{1}".format(LocationInfo, location_topic))
except Exception as ex:
LOG.debug("Failed to publish location due to: {0}".format(str(ex)))
continue
else:
break
def start_location_listener(self, location_info, handler=None):
topic='LocationQuery'
server="LocationService-{0}".format(self.node_name)
endpoints = [LocationProducer.ListenerEndpoint(location_info, handler)]
super(LocationProducer, self).add_listener(
topic, server, endpoints)
return True
def stop_location_listener(self):
topic='LocationQuery'
server="LocationService-{0}".format(self.node_name)
super(LocationProducer, self).remove_listener(
topic, server)
def is_listening(self):
topic='LocationQuery'
server="LocationService-{0}".format(self.node_name)
return super(LocationProducer, self).is_listening(
topic, server)

View File

@ -0,0 +1,12 @@
import logging
def get_logger(module_name):
logger = logging.getLogger(module_name)
return config_logger(logger)
def config_logger(logger):
'''
configure the logger: uncomment following lines for debugging
'''
# logger.setLevel(level=logging.DEBUG)
return logger

View File

@ -0,0 +1,22 @@
#coding=utf-8
import os
import json
import oslo_messaging
from oslo_config import cfg
def setup_client(rpc_endpoint_info, topic, server):
oslo_messaging.set_transport_defaults(rpc_endpoint_info.Exchange)
transport = oslo_messaging.get_rpc_transport(cfg.CONF, url=rpc_endpoint_info.TransportEndpoint)
target = oslo_messaging.Target(topic=topic,
version=rpc_endpoint_info.Version,
server=server,
namespace=rpc_endpoint_info.Namespace)
client = oslo_messaging.RPCClient(transport, target)
return client
def get_transport(rpc_endpoint_info):
oslo_messaging.set_transport_defaults(rpc_endpoint_info.Exchange)
return oslo_messaging.get_rpc_transport(cfg.CONF, url=rpc_endpoint_info.TransportEndpoint)

View File

@ -0,0 +1,10 @@
#coding=utf-8
from wsme import types as wtypes
from locationservicesdk.model.dto.resourcetype import EnumResourceType
class LocationInfo(wtypes.Base):
NodeName = wtypes.text
PodIP = wtypes.text
Timestamp = float
ResourceTypes = [EnumResourceType]

View File

@ -0,0 +1,9 @@
#coding=utf-8
from wsme import types as wtypes
EnumResourceType = wtypes.Enum(str, 'PTP', 'FPGA')
class ResourceType(object):
TypePTP = "PTP"
TypeFPGA = 'FPGA'

View File

@ -0,0 +1,34 @@
#coding=utf-8
from wsme import types as wtypes
RPC_ENDPOINT_BASE = {
'Version': '1.0',
'Namespace': 'notification',
'Exchange': 'notification_exchange',
'TransportEndpoint': '',
'Topic': '',
'Server': ''
}
class RpcEndpointInfo(wtypes.Base):
TransportEndpoint = wtypes.text
Exchange = wtypes.text
Topic = wtypes.text
Server = wtypes.text
Version = wtypes.text
Namespace = wtypes.text
def __init__(self, transport_endpoint):
self.endpoint_json = {
'Version': RPC_ENDPOINT_BASE['Version'],
'Namespace': RPC_ENDPOINT_BASE['Namespace'],
'Exchange': RPC_ENDPOINT_BASE['Exchange'],
'TransportEndpoint': transport_endpoint,
'Topic': RPC_ENDPOINT_BASE['Topic'],
'Server': RPC_ENDPOINT_BASE['Server']
}
super(RpcEndpointInfo, self).__init__(**self.endpoint_json)
def to_dict(self):
return self.endpoint_json

View File

@ -0,0 +1,131 @@
import os
import json
import time
import oslo_messaging
from oslo_config import cfg
import logging
import multiprocessing as mp
from locationservicesdk.common.helpers import rpc_helper
from locationservicesdk.model.dto.rpc_endpoint import RpcEndpointInfo
from locationservicesdk.model.dto.resourcetype import ResourceType
from locationservicesdk.client.locationproducer import LocationProducer
LOG = logging.getLogger(__name__)
from locationservicesdk.common.helpers import log_helper
log_helper.config_logger(LOG)
'''Entry point of Default Process Worker'''
def ProcessWorkerDefault(event, sqlalchemy_conf_json, registration_endpoint, location_info_json):
worker = LocationWatcherDefault(event, sqlalchemy_conf_json, registration_endpoint, location_info_json)
worker.run()
return
class LocationWatcherDefault:
class LocationRequestHandlerDefault(object):
def __init__(self, watcher):
self.watcher = watcher
def handle(self, **rpc_kwargs):
self.watcher.signal_location_event()
def __init__(self, event, sqlalchemy_conf_json, registration_transport_endpoint, location_info_json):
self.sqlalchemy_conf = json.loads(sqlalchemy_conf_json)
self.event = event
self.event_timeout = float(2.0)
self.event_iteration = 0
self.location_info = json.loads(location_info_json)
this_node_name = self.location_info['NodeName']
self.registration_endpoint = RpcEndpointInfo(registration_transport_endpoint)
self.LocationProducer = LocationProducer(
this_node_name,
self.registration_endpoint.TransportEndpoint)
def signal_location_event(self):
if self.event:
self.event.set()
else:
LOG.warning("Unable to assert location event")
pass
def run(self):
# start location listener
self.__start_listener()
while True:
# annouce the location
self.__announce_location()
if self.event.wait(self.event_timeout):
LOG.debug("daemon control event is asserted")
self.event.clear()
else:
# max timeout: 1 hour
if self.event_timeout < float(3600):
self.event_timeout = self.event_timeout + self.event_timeout
LOG.debug("daemon control event is timeout")
continue
self.__stop_listener()
'''Start listener to answer querying from clients'''
def __start_listener(self):
LOG.debug("start listener to answer location querying")
self.LocationProducer.start_location_listener(
self.location_info,
LocationWatcherDefault.LocationRequestHandlerDefault(self)
)
return
def __stop_listener(self):
LOG.debug("stop listener to answer location querying")
self.LocationProducer.stop_location_listener(self.location_info)
return
'''announce location'''
def __announce_location(self):
LOG.debug("announce location info to clients")
self.LocationProducer.announce_location(self.location_info)
return
class DaemonControl(object):
def __init__(
self, sqlalchemy_conf_json, registration_transport_endpoint,
location_info, process_worker = None, daemon_mode=True):
self.daemon_mode = daemon_mode
self.event = mp.Event()
self.registration_endpoint = RpcEndpointInfo(registration_transport_endpoint)
self.registration_transport = rpc_helper.get_transport(self.registration_endpoint)
self.location_info = location_info
self.sqlalchemy_conf_json = sqlalchemy_conf_json
if not process_worker:
process_worker = ProcessWorkerDefault
self.process_worker = process_worker
if not self.daemon_mode:
return
self.mpinstance = mp.Process(
target=process_worker,
args=(self.event, self.sqlalchemy_conf_json,
self.registration_endpoint.TransportEndpoint,
self.location_info))
self.mpinstance.start()
pass
def refresh(self):
if not self.daemon_mode:
self.process_worker(
self.event, self.sqlalchemy_conf_json,
self.registration_endpoint.TransportEndpoint, self.location_info)
self.event.set()

View File

@ -0,0 +1,43 @@
body {
background: #311F00;
color: white;
font-family: 'Helvetica Neue', 'Helvetica', 'Verdana', sans-serif;
padding: 1em 2em;
}
a {
color: #FAFF78;
text-decoration: none;
}
a:hover {
text-decoration: underline;
}
div#content {
width: 800px;
margin: 0 auto;
}
form {
margin: 0;
padding: 0;
border: 0;
}
fieldset {
border: 0;
}
input.error {
background: #FAFF78;
}
header {
text-align: center;
}
h1, h2, h3, h4, h5, h6 {
font-family: 'Futura-CondensedExtraBold', 'Futura', 'Helvetica', sans-serif;
text-transform: uppercase;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.9 KiB

View File

@ -0,0 +1,6 @@
[nosetests]
match=^test
where=apiserver
nocapture=1
cover-package=apiserver
cover-erase=1

View File

@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='apiserver',
version='0.1',
description='',
author='',
author_email='',
install_requires=[
"pecan",
],
test_suite='apiserver',
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=['ez_setup'])
)

View File

@ -0,0 +1,2 @@
BUILDER=docker
LABEL=locationservice-base

View File

@ -0,0 +1,23 @@
ARG BASE
FROM ${BASE}
ARG STX_REPO_FILE=/etc/yum.repos.d/stx.repo
ENV KUBE_LATEST_VERSION="v1.18.3"
RUN set -ex ;\
yum install --disablerepo=* \
$(grep '^name=' ${STX_REPO_FILE} | awk -F '=' '{printf "--enablerepo=" $2 " "}') \
-y \
gcc python3-devel python3-pip \
&& pip3 install --user pecan \
&& pip3 install oslo-config \
&& pip3 install oslo-messaging \
&& pip3 install WSME \
&& pip3 install sqlalchemy
WORKDIR /opt/
COPY ./notificationclient-sidecar /opt/notificationclient
RUN cd /opt/notificationclient && python3 setup.py develop
CMD ["bash"]

View File

@ -0,0 +1 @@
recursive-include public *

View File

@ -0,0 +1,66 @@
import os
SIDECAR_API_PORT = os.environ.get("SIDECAR_API_PORT", "8080")
SIDECAR_API_HOST = os.environ.get("SIDECAR_API_HOST", "127.0.0.1")
# Server Specific Configurations
server = {
'port': SIDECAR_API_PORT,
'host': SIDECAR_API_HOST
}
# Pecan Application Configurations
app = {
'root': 'sidecar.controllers.root.RootController',
'modules': ['sidecar'],
'static_root': '%(confdir)s/public',
'template_path': '%(confdir)s/sidecar/templates',
'debug': True,
'errors': {
404: '/error/404',
'__force_dict__': True
}
}
logging = {
'root': {'level': 'INFO', 'handlers': ['console']},
'loggers': {
'sidecar': {'level': 'DEBUG', 'handlers': ['console'], 'propagate': False},
'pecan': {'level': 'DEBUG', 'handlers': ['console'], 'propagate': False},
'py.warnings': {'handlers': ['console']},
'__force_dict__': True
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'color'
}
},
'formatters': {
'simple': {
'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]'
'[%(threadName)s] %(message)s')
},
'color': {
'()': 'pecan.log.ColorFormatter',
'format': ('%(asctime)s [%(padded_color_levelname)s] [%(name)s]'
'[%(threadName)s] %(message)s'),
'__force_dict__': True
}
}
}
# Bindings and options to pass to SQLAlchemy's ``create_engine``
sqlalchemy = {
'url' : 'sqlite:///sidecar.db',
'echo' : False,
'echo_pool' : False,
'pool_recycle' : 3600,
'encoding' : 'utf-8'
}
# Custom Configurations must be in Python dictionary format::
#
# foo = {'bar':'baz'}
#
# All configurations are accessible at::
# pecan.conf

View File

@ -0,0 +1,109 @@
import os
import json
import time
import oslo_messaging
from oslo_config import cfg
from notificationclientsdk.common.helpers import rpc_helper
from notificationclientsdk.model.dto.rpc_endpoint import RpcEndpointInfo
import logging
LOG = logging.getLogger(__name__)
from notificationclientsdk.common.helpers import log_helper
log_helper.config_logger(LOG)
class BrokerClientBase(object):
def __init__(self, broker_name, broker_transport_endpoint):
self.broker_name = broker_name
self.listeners = {}
self.broker_endpoint = RpcEndpointInfo(broker_transport_endpoint)
self.transport = rpc_helper.get_transport(self.broker_endpoint)
LOG.debug("Created Broker client:{0}".format(broker_name))
def __del__(self):
self.transport.cleanup()
del self.transport
return
def __create_listener(self, context):
target = oslo_messaging.Target(
topic=context['topic'],
server=context['server'])
endpoints = context['endpoints']
server = oslo_messaging.get_rpc_server(
self.transport, target, endpoints, executor=None)
return server
def _refresh(self):
for topic, servers in self.listeners.items():
for servername, context in servers.items():
try:
rpcserver = context.get('rpcserver', None)
isactive = context.get('active', False)
if isactive and not rpcserver:
rpcserver = self.__create_listener(context)
rpcserver.start()
context['rpcserver'] = rpcserver
LOG.debug("Started rpcserver@{0}@{1}".format(context['topic'], context['server']))
elif not isactive and rpcserver:
rpcserver.stop()
rpcserver.wait()
context.pop('rpcserver')
LOG.debug("Stopped rpcserver@{0}@{1}".format(context['topic'], context['server']))
except:
LOG.error("Failed to update listener for topic/server:{0}/{1}"
.format(topic, servername))
continue
def add_listener(self, topic, server, listener_endpoints=None):
context = self.listeners.get(topic,{}).get(server, {})
if not context:
context = {
'endpoints': listener_endpoints,
'topic': topic,
'server': server,
'active': True
}
if not self.listeners.get(topic, None):
self.listeners[topic] = {}
self.listeners[topic][server] = context
else:
context['endpoints'] = listener_endpoints
context['active'] = True
self._refresh()
def remove_listener(self, topic, server):
context = self.listeners.get(topic,{}).get(server, {})
if context:
context['active'] = False
self._refresh()
def is_listening(self, topic, server):
context = self.listeners.get(topic,{}).get(server, {})
return context.get('active', False)
def any_listener(self):
for topic, servers in self.listeners.items():
for servername, context in servers.items():
isactive = context.get('active', False)
if isactive:
return True
return False
def call(self, topic, server, api_name, timeout=None, retry=None, **api_kwargs):
target = oslo_messaging.Target(
topic=topic, server=server, version=self.broker_endpoint.Version,
namespace=self.broker_endpoint.Namespace)
# note: the call might stuck here on 'Connection failed' and retry forever
# due to the tcp connection is unreachable: 'AMQP server on <broker host>:<port> is unreachable: timed out'
queryclient = oslo_messaging.RPCClient(self.transport, target, timeout = timeout, retry = retry)
return queryclient.call({}, api_name, **api_kwargs)
def cast(self, topic, api_name, timeout=None, retry=None, **api_kwargs):
target = oslo_messaging.Target(
topic=topic, fanout=True, version=self.broker_endpoint.Version,
namespace=self.broker_endpoint.Namespace)
queryclient = oslo_messaging.RPCClient(self.transport, target, timeout = timeout, retry = retry)
queryclient.cast({}, api_name, **api_kwargs)

View File

@ -0,0 +1,149 @@
import os
import json
import time
import oslo_messaging
from oslo_config import cfg
from notificationclientsdk.common.helpers import hostfile_helper
from notificationclientsdk.client.base import BrokerClientBase
from notificationclientsdk.client.notificationservice import NotificationServiceClient, NotificationHandlerBase
import logging
LOG = logging.getLogger(__name__)
from notificationclientsdk.common.helpers import log_helper
log_helper.config_logger(LOG)
class LocationHandlerBase(object):
def __init__(self):
self.NOTIFICATIONSERVICE_HOSTNAME = 'notificationservice-{0}'
pass
def handle(self, location_info):
pass
class LocationHandlerDefault(LocationHandlerBase):
def __init__(self, host_file_path='/etc/hosts'):
self.hostfile = host_file_path
super(LocationHandlerDefault, self).__init__()
def handle(self, location_info):
LOG.debug("Received location info:{0}".format(location_info))
nodename = location_info.get('NodeName', None)
podip = location_info.get("PodIP", None)
if not nodename or not podip:
LOG.warning("Mising NodeName or PodIP inside location info")
return False
hostfile_helper.update_host(
self.NOTIFICATIONSERVICE_HOSTNAME.format(nodename),
podip)
LOG.debug("Updated location with IP:{0}".format(podip))
return True
class LocationServiceClient(BrokerClientBase):
class ListenerEndpoint(object):
target = oslo_messaging.Target(namespace='notification', version='1.0')
def __init__(self, handler):
self.handler = handler
def NotifyLocation(self, ctx, location_info):
LOG.debug("LocationServiceClient NotifyLocation called %s" % location_info)
self.handler.handle(location_info)
return time.time()
def __init__(self, registrationservice_transport_endpoint):
self.Id = id(self)
super(LocationServiceClient, self).__init__(
'locationservice', registrationservice_transport_endpoint)
return
def __del__(self):
super(LocationServiceClient, self).__del__()
return
def update_location(self, target_node_name, location_handler=None, timeout=None, retry=None):
if not location_handler:
location_handler = LocationHandlerDefault('/etc/hosts')
location_info = self.query_location(target_node_name, timeout=timeout, retry=retry)
if location_info:
location_handler.handle(location_info)
return True
else:
return False
def query_location(self, target_node_name, timeout=None, retry=None):
topic = 'LocationQuery'
server = 'LocationService-{0}'.format(target_node_name)
return self.call(topic, server, 'QueryLocation', timeout=timeout, retry=retry)
def trigger_location_annoucement(self, timeout=None, retry=None):
topic = 'LocationQuery'
return self.cast(topic, 'TriggerAnnouncement', timeout=timeout, retry=retry)
def add_location_listener(self, target_node_name, location_handler=None):
if not location_handler:
location_handler = LocationHandlerDefault('/etc/hosts')
topic='LocationListener-{0}'.format(target_node_name)
server="LocationListener-{0}".format(self.Id)
endpoints = [LocationServiceClient.ListenerEndpoint(location_handler)]
super(LocationServiceClient, self).add_listener(
topic, server, endpoints)
return True
def remove_location_listener(self, target_node_name):
topic='LocationListener-{0}'.format(target_node_name)
server="LocationListener-{0}".format(self.Id)
super(LocationServiceClient, self).remove_listener(
topic, server)
def is_listening_on_location(self, target_node_name):
topic='LocationListener-{0}'.format(target_node_name)
server="LocationListener-{0}".format(self.Id)
return super(LocationServiceClient, self).is_listening(
topic, server)
### extensions
def trigger_publishing_status(self, resource_type,
timeout=None, retry=None, resource_qualifier_json=None):
topic = '{0}-Status'.format(resource_type)
try:
self.cast(
topic, 'TriggerDelivery', timeout=timeout, retry=retry,
QualifierJson=resource_qualifier_json)
except Exception as ex:
LOG.warning("Fail to trigger_publishing_status: {0}".format(str(ex)))
return False
return True
def add_resource_status_listener(self, resource_type, status_handler=None):
if not status_handler:
status_handler = NotificationHandlerBase()
topic='{0}-Event-*'.format(resource_type)
server="{0}-EventListener-{1}".format(resource_type, self.Id)
endpoints = [NotificationServiceClient.ListenerEndpoint(status_handler)]
super(LocationServiceClient, self).add_listener(
topic, server, endpoints)
return True
def remove_resource_status_listener(self, resource_type):
topic='{0}-Event-*'.format(resource_type)
server="{0}-EventListener-{1}".format(resource_type, self.Id)
super(LocationServiceClient, self).remove_listener(
topic, server)
pass
def is_listening_on_resource(self, resource_type):
topic='{0}-Event-*'.format(resource_type)
server="{0}-EventListener-{1}".format(resource_type, self.Id)
return super(LocationServiceClient, self).is_listening(
topic, server)

View File

@ -0,0 +1,85 @@
import os
import json
import time
import oslo_messaging
from oslo_config import cfg
from notificationclientsdk.model.dto.rpc_endpoint import RpcEndpointInfo
from notificationclientsdk.client.base import BrokerClientBase
from notificationclientsdk.model.dto.subscription import SubscriptionInfo
from notificationclientsdk.repository.subscription_repo import SubscriptionRepo
import logging
LOG = logging.getLogger(__name__)
from notificationclientsdk.common.helpers import log_helper
log_helper.config_logger(LOG)
class NotificationHandlerBase(object):
def __init__(self):
pass
def handle(self, notification_info):
return False
class NotificationServiceClient(BrokerClientBase):
class ListenerEndpoint(object):
target = oslo_messaging.Target(namespace='notification', version='1.0')
def __init__(self, handler):
self.handler = handler
def NotifyStatus(self, ctx, notification):
LOG.debug("NotificationServiceClient NotifyStatus called %s" % notification)
self.handler.handle(notification)
return time.time()
'''Init client to notification service'''
def __init__(self, target_node_name, notificationservice_transport_endpoint):
self.Id = id(self)
self.target_node_name = target_node_name
super(NotificationServiceClient, self).__init__(
'{0}'.format(target_node_name),
notificationservice_transport_endpoint)
return
def __del__(self):
super(NotificationServiceClient, self).__del__()
return
def query_resource_status(self, resource_type,
timeout=None, retry=None, resource_qualifier_json=None):
topic = '{0}-Status'.format(resource_type)
server = '{0}-Tracking-{1}'.format(resource_type, self.target_node_name)
return self.call(
topic, server, 'QueryStatus', timeout=timeout, retry=retry,
QualifierJson=resource_qualifier_json)
def add_resource_status_listener(self, resource_type, status_handler=None):
if not status_handler:
status_handler = NotificationHandlerBase()
topic='{0}-Event-{1}'.format(resource_type, self.broker_name)
server="{0}-EventListener-{1}".format(resource_type, self.Id)
endpoints = [NotificationServiceClient.ListenerEndpoint(status_handler)]
super(NotificationServiceClient, self).add_listener(
topic, server, endpoints)
return True
def remove_resource_status_listener(self, resource_type):
topic='{0}-Event-{1}'.format(resource_type, self.broker_name)
server="{0}-EventListener-{1}".format(resource_type, self.Id)
super(NotificationServiceClient, self).remove_listener(
topic, server)
pass
def is_listening_on_resource(self, resource_type):
topic='{0}-Event-{1}'.format(resource_type, self.broker_name)
server="{0}-EventListener-{1}".format(resource_type, self.Id)
return super(NotificationServiceClient, self).is_listening(
topic, server)

View File

@ -0,0 +1,29 @@
#coding:utf8
import os
import sys
import re
def update_host(hostname, ip):
hostsfile="/etc/hosts"
Lines=[]
replaced = False
with open(hostsfile) as fd:
for line in fd.readlines():
if line.strip() == '':
Lines.append(line)
else:
h_name = line.strip().split()[1]
if h_name == hostname:
lin = "{0} {1}".format(ip, hostname)
Lines.append(lin)
replaced = True
else:
Lines.append(line)
if replaced == False:
Lines.append("{0} {1}".format(ip, hostname))
with open(hostsfile, 'w') as fc:
fc.writelines(Lines)

View File

@ -0,0 +1,12 @@
import logging
def get_logger(module_name):
logger = logging.getLogger(module_name)
return config_logger(logger)
def config_logger(logger):
'''
configure the logger: uncomment following lines for debugging
'''
# logger.setLevel(level=logging.DEBUG)
return logger

View File

@ -0,0 +1,71 @@
import json
from notificationclientsdk.repository.node_repo import NodeRepo
class NodeInfoHelper(object):
BROKER_NODE_ALL = '*'
residing_node_name = None
@staticmethod
def set_residing_node(residing_node_name):
NodeInfoHelper.residing_node_name = residing_node_name
@staticmethod
def get_residing_node():
residing_node_name = NodeInfoHelper.residing_node_name
return residing_node_name
@staticmethod
def expand_node_name(node_name_pattern):
if node_name_pattern == '.':
return NodeInfoHelper.residing_node_name
elif node_name_pattern == NodeInfoHelper.BROKER_NODE_ALL:
return NodeInfoHelper.BROKER_NODE_ALL
else:
return node_name_pattern
@staticmethod
def default_node_name(node_name_pattern):
if node_name_pattern == '.' or node_name_pattern == '*':
return NodeInfoHelper.residing_node_name
else:
return node_name_pattern
@staticmethod
def match_node_name(node_name_pattern, target_node_name):
if node_name_pattern == '*':
return True
elif node_name_pattern == '.':
return NodeInfoHelper.residing_node_name == target_node_name
else:
return node_name_pattern == target_node_name
@staticmethod
def enumerate_nodes(node_name_pattern):
'''
enumerate nodes from node repo by pattern
'''
nodeinfos = []
if not node_name_pattern:
raise ValueError("node name pattern is invalid")
nodeinfo_repo = None
try:
nodeinfo_repo = NodeRepo(autocommit=True)
filter = {}
if node_name_pattern == '*':
pass
elif not node_name_pattern or node_name_pattern == '.':
filter = { 'NodeName': NodeInfoHelper.residing_node_name }
else:
filter = { 'NodeName': node_name_pattern }
nodeinfos = [x.NodeName for x in nodeinfo_repo.get(Status=1, **filter)]
except Exception as ex:
LOG.warning("Failed to enumerate nodes:{0}".format(str(ex)))
nodeinfos = None
finally:
if nodeinfo_repo:
del nodeinfo_repo
return nodeinfos

View File

@ -0,0 +1,22 @@
#coding=utf-8
import os
import json
import oslo_messaging
from oslo_config import cfg
def setup_client(rpc_endpoint_info, topic, server):
oslo_messaging.set_transport_defaults(rpc_endpoint_info.Exchange)
transport = oslo_messaging.get_rpc_transport(cfg.CONF, url=rpc_endpoint_info.TransportEndpoint)
target = oslo_messaging.Target(topic=topic,
version=rpc_endpoint_info.Version,
server=server,
namespace=rpc_endpoint_info.Namespace)
client = oslo_messaging.RPCClient(transport, target)
return client
def get_transport(rpc_endpoint_info):
oslo_messaging.set_transport_defaults(rpc_endpoint_info.Exchange)
return oslo_messaging.get_rpc_transport(cfg.CONF, url=rpc_endpoint_info.TransportEndpoint)

View File

@ -0,0 +1,49 @@
#coding=utf-8
import os
import json
import requests
import logging
from notificationclientsdk.common.helpers.nodeinfo_helper import NodeInfoHelper
LOG = logging.getLogger(__name__)
from notificationclientsdk.common.helpers import log_helper
log_helper.config_logger(LOG)
def notify(subscriptioninfo, notification, timeout=2, retry=3):
result = False
while True:
retry = retry - 1
try:
headers = {'Content-Type': 'application/json'}
data = json.dumps(notification)
url = subscriptioninfo.EndpointUri
response = requests.post(url, data=data, headers=headers, timeout=timeout)
response.raise_for_status()
result = True
return response
except requests.exceptions.ConnectionError as errc:
if retry > 0:
LOG.warning("Retry notifying due to: {0}".format(str(errc)))
continue
raise errc
except requests.exceptions.Timeout as errt:
if retry > 0:
LOG.warning("Retry notifying due to: {0}".format(str(errt)))
continue
raise errt
except requests.exceptions.RequestException as ex:
LOG.warning("Failed to notify due to: {0}".format(str(ex)))
raise ex
except requests.exceptions.HTTPError as ex:
LOG.warning("Failed to notify due to: {0}".format(str(ex)))
raise ex
except Exception as ex:
LOG.warning("Failed to notify due to: {0}".format(str(ex)))
raise ex
return result

View File

@ -0,0 +1,16 @@
from notificationclientsdk.model.dto.subscription import SubscriptionInfo
from notificationclientsdk.model.dto.subscription import ResourceQualifierPtp
from wsme.rest.json import tojson
@tojson.when_object(SubscriptionInfo)
def subscriptioninfo_tojson(datatype, value):
if value is None:
return None
return value.to_dict()
@tojson.when_object(ResourceQualifierPtp)
def resourcequalifierptp_tojson(datatype, value):
if value is None:
return None
return value.to_dict()

View File

@ -0,0 +1,30 @@
#coding=utf-8
import json
from wsme import types as wtypes
from notificationclientsdk.model.dto.resourcetype import EnumResourceType
class LocationInfo(wtypes.Base):
NodeName = wtypes.text
PodIP = wtypes.text
Timestamp = float
ResourceTypes = [EnumResourceType]
def to_dict(self):
d = {
'NodeName': self.NodeName,
'PodIP': self.PodIP,
'Timestamp': self.Timestamp,
'ResourceTypes': [x for x in self.ResourceTypes]
}
return d
def to_orm(self):
d = {
'NodeName': self.NodeName,
'PodIP': self.PodIP or '',
'Timestamp': self.Timestamp,
'ResourceTypes': json.dumps([x for x in self.ResourceTypes]) if self.ResourceTypes else ''
}
return d

View File

@ -0,0 +1,9 @@
#coding=utf-8
from wsme import types as wtypes
EnumResourceType = wtypes.Enum(str, 'PTP', 'FPGA')
class ResourceType(object):
TypePTP = "PTP"
TypeFPGA = "FPGA"

View File

@ -0,0 +1,34 @@
#coding=utf-8
from wsme import types as wtypes
RPC_ENDPOINT_BASE = {
'Version': '1.0',
'Namespace': 'notification',
'Exchange': 'notification_exchange',
'TransportEndpoint': '',
'Topic': '',
'Server': ''
}
class RpcEndpointInfo(wtypes.Base):
TransportEndpoint = wtypes.text
Exchange = wtypes.text
Topic = wtypes.text
Server = wtypes.text
Version = wtypes.text
Namespace = wtypes.text
def __init__(self, transport_endpoint):
self.endpoint_json = {
'Version': RPC_ENDPOINT_BASE['Version'],
'Namespace': RPC_ENDPOINT_BASE['Namespace'],
'Exchange': RPC_ENDPOINT_BASE['Exchange'],
'TransportEndpoint': transport_endpoint,
'Topic': RPC_ENDPOINT_BASE['Topic'],
'Server': RPC_ENDPOINT_BASE['Server']
}
super(RpcEndpointInfo, self).__init__(**self.endpoint_json)
def to_dict(self):
return self.endpoint_json

View File

@ -0,0 +1,94 @@
#coding=utf-8
import os
import json
from wsme import types as wtypes
import datetime
import time
import uuid
from notificationclientsdk.model.dto.resourcetype import EnumResourceType, ResourceType
'''
Base for Resource Qualifiers
'''
class ResourceQualifierBase(wtypes.Base):
def __init__(self, **kw):
super(ResourceQualifierBase, self).__init__(**kw)
def to_dict(self):
pass
'''
Resource Qualifiers PTP
'''
class ResourceQualifierPtp(ResourceQualifierBase):
NodeName = wtypes.text
def __init__(self, **kw):
self.NodeName = kw.pop('NodeName', None)
super(ResourceQualifierPtp, self).__init__(**kw)
def to_dict(self):
d = {
'NodeName': self.NodeName
}
return d
'''
ViewModel of Subscription
'''
class SubscriptionInfo(wtypes.Base):
SubscriptionId = wtypes.text
UriLocation = wtypes.text
ResourceType = EnumResourceType
EndpointUri = wtypes.text
# dynamic type depending on ResourceType
def set_resource_qualifier(self, value):
if isinstance(value, wtypes.Base):
self._ResourceQualifer = value
else:
self._ResourceQualifierJson = value
self._ResourceQualifer = None
def get_resource_qualifier(self):
if not self._ResourceQualifer:
if self.ResourceType == ResourceType.TypePTP:
self._ResourceQualifer = ResourceQualifierPtp(**self._ResourceQualifierJson)
return self._ResourceQualifer
ResourceQualifier = wtypes.wsproperty(wtypes.Base,
get_resource_qualifier, set_resource_qualifier, mandatory=True)
def __init__(self, orm_entry=None):
if orm_entry:
self.SubscriptionId = orm_entry.SubscriptionId
self.ResourceType = orm_entry.ResourceType
self.UriLocation = orm_entry.UriLocation
self.ResourceQualifier = json.loads(orm_entry.ResourceQualifierJson)
self.EndpointUri = orm_entry.EndpointUri
def to_dict(self):
d = {
'SubscriptionId': self.SubscriptionId,
'ResourceType': self.ResourceType,
'UriLocation': self.UriLocation,
'EndpointUri': self.EndpointUri,
'ResourceQualifier': self.ResourceQualifier.to_dict()
}
return d
def to_orm(self):
d = {
'SubscriptionId': self.SubscriptionId,
'ResourceType': self.ResourceType or '',
'UriLocation': self.UriLocation,
'EndpointUri': self.EndpointUri or '',
'ResourceQualifierJson': json.dumps(self.ResourceQualifier.to_dict()) or ''
}
return d

View File

@ -0,0 +1,10 @@
import sqlalchemy
from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declarative_base
DefaultMetaData = MetaData()
OrmBase = declarative_base(metadata = DefaultMetaData) #生成orm基类
def create_tables(orm_engine):
OrmBase.metadata.create_all(orm_engine) #创建表结构
return OrmBase.metadata

View File

@ -0,0 +1,20 @@
from sqlalchemy import Float, Integer, ForeignKey, String, Column
from notificationclientsdk.model.orm.base import OrmBase
'''
NodeName: literal Node Name
ResourceTypes: json dump of Enumerate string list: PTP, FPGA, etc
'''
class NodeInfo(OrmBase):
__tablename__ = 'nodeinfo'
NodeName = Column(String(128), primary_key=True)
PodIP = Column(String(256))
ResourceTypes = Column(String(1024))
Timestamp = Column(Float)
Status = Column(Integer)
CreateTime = Column(Float)
LastUpdateTime = Column(Float)
def create_tables(orm_engine):
NodeInfo.metadata.create_all(orm_engine)

View File

@ -0,0 +1,17 @@
from sqlalchemy import Float, Integer, ForeignKey, String, Column
from notificationclientsdk.model.orm.base import OrmBase
class Subscription(OrmBase):
__tablename__ = 'subscription'
SubscriptionId = Column(String(128), primary_key=True)
UriLocation = Column(String(512))
ResourceType = Column(String(64))
EndpointUri = Column(String(512))
Status = Column(Integer)
CreateTime = Column(Float)
LastUpdateTime = Column(Float)
ResourceQualifierJson = Column(String)
def create_tables(orm_engine):
Subscription.metadata.create_all(orm_engine)

View File

@ -0,0 +1,75 @@
import logging
from sqlalchemy import create_engine, MetaData
from sqlalchemy.orm import scoped_session, sessionmaker
from notificationclientsdk.model.orm import base
from notificationclientsdk.model.orm import subscription
from notificationclientsdk.model.orm import node
LOG = logging.getLogger(__name__)
from notificationclientsdk.common.helpers import log_helper
log_helper.config_logger(LOG)
class DbContext(object):
# static properties
DBSession = None
metadata = None
engine = None
@staticmethod
def _engine_from_config(configuration):
configuration = dict(configuration)
url = configuration.pop('url')
return create_engine(url, **configuration)
@staticmethod
def init_dbcontext(sqlalchemy_conf):
"""
This is a stub method which is called at application startup time.
If you need to bind to a parsed database configuration, set up tables or
ORM classes, or perform any database initialization, this is the
recommended place to do it.
For more information working with databases, and some common recipes,
see https://pecan.readthedocs.io/en/latest/databases.html
"""
DbContext.engine = DbContext._engine_from_config(sqlalchemy_conf)
DbContext.DbSession = sessionmaker(bind=DbContext.engine)
DbContext.metadata = base.create_tables(DbContext.engine)
DbContext.metadata.bind = DbContext.engine
def __init__(self, session=None):
LOG.debug("initing DbContext ...")
if not session:
if not DbContext.engine:
raise Exception("DbContext must be inited with DbContext.init_dbcontext() first")
session = scoped_session(DbContext.DbSession)
self.session = session
def __del__(self):
LOG.debug("deleting DbContext ...")
pass
def get_session(self):
return self.session
def start(self):
pass
def start_read_only(self):
self.start()
def commit(self):
self.session.commit()
def rollback(self):
self.session.rollback()
def clear(self):
self.session.remove()

View File

@ -0,0 +1,81 @@
import time, uuid
import logging
from sqlalchemy.orm import scoped_session, sessionmaker
from notificationclientsdk.model.orm.node import NodeInfo as NodeInfoOrm
from notificationclientsdk.repository.dbcontext import DbContext
LOG = logging.getLogger(__name__)
from notificationclientsdk.common.helpers import log_helper
log_helper.config_logger(LOG)
class NodeRepo(DbContext):
def __init__(self, session=None, autocommit=False):
self.autocommit = autocommit
super(NodeRepo, self).__init__(session)
if session:
self.own_session = False
else:
self.own_session = True
def __del__(self):
if self.own_session:
self.clear()
def add(self, nodeinfo):
try:
nodeinfo.Status = 1
nodeinfo.CreateTime = time.time()
nodeinfo.LastUpdateTime = nodeinfo.CreateTime
self.session.add(nodeinfo)
except Exception as ex:
if self.autocommit:
self.rollback()
raise ex
else:
if self.autocommit:
self.commit()
return nodeinfo
def update(self, node_name, **data):
try:
data['LastUpdateTime'] = time.time()
self.session.query(NodeInfoOrm).filter_by(NodeName=node_name).update(data)
except Exception as ex:
if self.autocommit:
self.rollback()
raise ex
else:
if self.autocommit:
self.commit()
def get_one(self, **filter):
return self.session.query(NodeInfoOrm).filter_by(**filter).first()
def get(self, **filter):
return self.session.query(NodeInfoOrm).filter_by(**filter)
def delete_one(self, **filter):
try:
entry = self.session.query(NodeInfoOrm).filter_by(**filter).first()
self.session.delete(entry)
except Exception as ex:
if self.autocommit:
self.rollback()
raise ex
else:
if self.autocommit:
self.commit()
def delete(self, **filter):
try:
entry = self.session.query(NodeInfoOrm).filter_by(**filter).delete()
except Exception as ex:
if self.autocommit:
self.rollback()
raise ex
else:
if self.autocommit:
self.commit()

View File

@ -0,0 +1,86 @@
import time, uuid
import logging
from sqlalchemy.orm import scoped_session, sessionmaker
from notificationclientsdk.model.orm.subscription import Subscription as SubscriptionOrm
from notificationclientsdk.repository.dbcontext import DbContext
LOG = logging.getLogger(__name__)
from notificationclientsdk.common.helpers import log_helper
log_helper.config_logger(LOG)
class SubscriptionRepo(DbContext):
def __init__(self, session=None, autocommit=False):
self.autocommit = autocommit
super(SubscriptionRepo, self).__init__(session)
if session:
self.own_session = False
else:
self.own_session = True
def __del__(self):
if self.own_session:
self.clear()
def add(self, subscription):
try:
subscription.SubscriptionId = str(uuid.uuid1())
subscription.Status = 1
subscription.CreateTime = time.time()
subscription.LastUpdateTime = subscription.CreateTime
subscription.UriLocation = "{0}/{1}".format(
subscription.UriLocation, subscription.SubscriptionId)
self.session.add(subscription)
except Exception as ex:
if self.autocommit:
self.rollback()
raise ex
else:
if self.autocommit:
self.commit()
return subscription
def update(self, subscriptionid, **data):
try:
data['LastUpdateTime'] = time.time()
self.session.query(SubscriptionOrm).filter_by(SubscriptionId=subscriptionid).update(data)
except Exception as ex:
if self.autocommit:
self.rollback()
raise ex
else:
if self.autocommit:
self.commit()
def get_one(self, **filter):
return self.session.query(SubscriptionOrm).filter_by(**filter).first()
def get(self, **filter):
return self.session.query(SubscriptionOrm).filter_by(**filter)
def delete_one(self, **filter):
try:
entry = self.session.query(SubscriptionOrm).filter_by(**filter).first()
self.session.delete(entry)
except Exception as ex:
if self.autocommit:
self.rollback()
raise ex
else:
if self.autocommit:
self.commit()
def delete(self, **filter):
try:
entry = self.session.query(SubscriptionOrm).filter_by(**filter).delete()
except Exception as ex:
if self.autocommit:
self.rollback()
raise ex
else:
if self.autocommit:
self.commit()

View File

@ -0,0 +1,666 @@
import os
import json
import time
import oslo_messaging
from oslo_config import cfg
import logging
import multiprocessing as mp
import threading
import sys
if sys.version > '3':
import queue as Queue
else:
import Queue
from notificationclientsdk.common.helpers import subscription_helper
from notificationclientsdk.common.helpers import rpc_helper, hostfile_helper
from notificationclientsdk.common.helpers.nodeinfo_helper import NodeInfoHelper
from notificationclientsdk.model.dto.rpc_endpoint import RpcEndpointInfo
from notificationclientsdk.model.dto.subscription import SubscriptionInfo
from notificationclientsdk.model.dto.resourcetype import ResourceType
from notificationclientsdk.model.dto.location import LocationInfo
from notificationclientsdk.repository.dbcontext import DbContext
from notificationclientsdk.repository.subscription_repo import SubscriptionRepo
from notificationclientsdk.model.orm.node import NodeInfo as NodeInfoOrm
from notificationclientsdk.repository.node_repo import NodeRepo
from notificationclientsdk.client.locationservice import LocationServiceClient
from notificationclientsdk.client.notificationservice import NotificationServiceClient
from notificationclientsdk.client.notificationservice import NotificationHandlerBase
from notificationclientsdk.client.locationservice import LocationHandlerDefault
LOG = logging.getLogger(__name__)
from notificationclientsdk.common.helpers import log_helper
log_helper.config_logger(LOG)
'''Entry point of Default Process Worker'''
def ProcessWorkerDefault(event, subscription_event, daemon_context):
worker = NotificationWorker(event, subscription_event, daemon_context)
worker.run()
return
class NotificationWorker:
class NotificationWatcher(NotificationHandlerBase):
def __init__(self, notification_watcher):
self.notification_watcher = notification_watcher
super(NotificationWorker.NotificationWatcher, self).__init__()
def handle(self, notification_info):
LOG.debug("Received notification:{0}".format(notification_info))
result = self.notification_watcher.handle_notification_delivery(notification_info)
return result
class NodeInfoWatcher(LocationHandlerDefault):
def __init__(self, notification_watcher):
self.notification_watcher = notification_watcher
super(NotificationWorker.NodeInfoWatcher, self).__init__()
def handle(self, location_info):
LOG.debug("Received location info:{0}".format(location_info))
return self.notification_watcher.produce_location_event(location_info)
def __init__(
self, event, subscription_event, daemon_context):
self.daemon_context = daemon_context
self.residing_node_name = daemon_context['THIS_NODE_NAME']
NodeInfoHelper.set_residing_node(self.residing_node_name)
self.sqlalchemy_conf = json.loads(daemon_context['SQLALCHEMY_CONF_JSON'])
DbContext.init_dbcontext(self.sqlalchemy_conf)
self.event = event
self.subscription_event = subscription_event
self.registration_endpoint = RpcEndpointInfo(daemon_context['REGISTRATION_TRANSPORT_ENDPOINT'])
self.locationservice_client = LocationServiceClient(self.registration_endpoint.TransportEndpoint)
# dict,key: node name, value , notificationservice client
self.notificationservice_clients = {}
# Watcher callback
self.__NotificationWatcher = NotificationWorker.NotificationWatcher(self)
self.__NodeInfoWatcher = NotificationWorker.NodeInfoWatcher(self)
self.__init_node_resources_map()
self.__init_node_info_channel()
self.__init_location_channel()
self.__init_notification_channel()
self.__init_node_sync_channel()
def __init_node_resources_map(self):
self.node_resources_map = {}
self.node_resources_iteration = 0
self.__node_resources_event = mp.Event()
def __init_node_info_channel(self):
self.__node_info_event = mp.Event()
def __init_location_channel(self):
self.location_event = mp.Event()
self.location_lock = threading.Lock()
# map index by node name
# only cache the latest loation info
self.location_channel = {}
self.location_keys_q = Queue.Queue()
def __init_notification_channel(self):
self.notification_lock = threading.Lock()
self.notification_stat = {}
def __init_node_sync_channel(self):
self.__node_sync_event = mp.Event()
# initial to be set
self.__node_sync_event.set()
def __del__(self):
del self.locationservice_client
def signal_location_event(self):
self.location_event.set()
def signal_subscription_event(self):
self.subscription_event.set()
def signal_node_sync_event(self):
self.__node_sync_event.set()
def signal_nodeinfo_event(self):
self.__node_info_event.set()
def signal_node_resources_event(self):
self.__node_resources_event.set()
def signal_events(self):
self.event.set()
def produce_location_event(self, location_info):
node_name = location_info.get('NodeName', None)
podip = location_info.get("PodIP", None)
if not node_name or not podip:
LOG.warning("Missing PodIP inside location info:{0}".format(location_info))
return False
result = True
timestamp = location_info.get('Timestamp', 0)
# acquire lock to sync threads which invoke this method
self.location_lock.acquire()
try:
current = self.location_channel.get(node_name, {})
if current.get('Timestamp', 0) < timestamp:
if current.get('PodIP', None) != podip:
# update /etc/hosts must happen in threads to avoid blocking by the main thread
NOTIFICATIONSERVICE_HOSTNAME = 'notificationservice-{0}'
hostfile_helper.update_host(
NOTIFICATIONSERVICE_HOSTNAME.format(node_name), podip)
LOG.debug("Updated location with IP:{0}".format(podip))
# replace the location_info
self.location_channel[node_name] = location_info
self.location_keys_q.put(node_name)
# notify the consumer to process the update
self.signal_location_event()
self.signal_events()
result = True
except Exception as ex:
LOG.warning("failed to produce location event:{0}".format(str(ex)))
result = False
finally:
# release lock
self.location_lock.release()
return result
def consume_location_event(self):
LOG.debug("Start consuming location event")
need_to_sync_node = False
node_changed = False
node_resource_updated = False
nodeinfo_repo = NodeRepo(autocommit=True)
while not self.location_keys_q.empty():
node_name = self.location_keys_q.get(False)
location_info = self.location_channel.get(node_name, None)
if not location_info:
LOG.warning("consume location@{0} without location info".format(node_name))
continue
LOG.debug("process location event@{0}:{1}".format(node_name, location_info))
location_info2 = LocationInfo(**location_info)
entry = nodeinfo_repo.get_one(NodeName=location_info['NodeName'], Status=1)
if not entry:
entry = NodeInfoOrm(**location_info2.to_orm())
nodeinfo_repo.add(entry)
node_resource_updated = True
node_changed = True
LOG.debug("Add NodeInfo: {0}".format(entry.NodeName))
elif not entry.Timestamp or entry.Timestamp < location_info['Timestamp']:
# update the entry
if entry.ResourceTypes != location_info2.ResourceTypes:
node_resource_updated = True
nodeinfo_repo.update(entry.NodeName, **location_info2.to_orm())
LOG.debug("Update NodeInfo: {0}".format(entry.NodeName))
else:
# do nothing
LOG.debug("Ignore the location for: {0}".format(entry.NodeName))
continue
need_to_sync_node = True
continue
del nodeinfo_repo
LOG.debug("Finished consuming location event")
if need_to_sync_node or node_resource_updated:
if node_changed:
LOG.debug("signal node changed event")
# node changes triggers rebuild map from subscription
# due to the potential subscriptions to all nodes
self.signal_subscription_event()
if node_resource_updated:
# signal the potential changes on node resources
LOG.debug("signal node resources updating event")
self.signal_nodeinfo_event()
if need_to_sync_node:
LOG.debug("signal node syncing event")
self.signal_node_sync_event()
self.signal_events()
pass
def handle_notification_delivery(self, notification_info):
LOG.debug("start notification delivery")
result = True
subscription_repo = None
try:
self.notification_lock.acquire()
subscription_repo = SubscriptionRepo(autocommit=True)
resource_type = notification_info.get('ResourceType', None)
node_name = notification_info.get('ResourceQualifier', {}).get('NodeName', None)
if not resource_type:
raise Exception("abnormal notification@{0}".format(node_name))
if resource_type == ResourceType.TypePTP:
pass
else:
raise Exception("notification with unsupported resource type:{0}".format(resource_type))
this_delivery_time = notification_info['EventTimestamp']
entries = subscription_repo.get(ResourceType=resource_type, Status=1)
for entry in entries:
subscriptionid = entry.SubscriptionId
ResourceQualifierJson = entry.ResourceQualifierJson or '{}'
ResourceQualifier = json.loads(ResourceQualifierJson)
# qualify by NodeName
entry_node_name = ResourceQualifier.get('NodeName', None)
node_name_matched = NodeInfoHelper.match_node_name(entry_node_name, node_name)
if not node_name_matched:
continue
subscription_dto2 = SubscriptionInfo(entry)
try:
last_delivery_stat = self.notification_stat.get(node_name,{}).get(subscriptionid,{})
last_delivery_time = last_delivery_stat.get('EventTimestamp', None)
if last_delivery_time and last_delivery_time >= this_delivery_time:
# skip this entry since already delivered
LOG.debug("Ignore the notification for: {0}".format(entry.SubscriptionId))
raise Exception("notification timestamp indicate it is not lastest")
subscription_helper.notify(subscription_dto2, notification_info)
LOG.debug("notification is delivered successfully to {0}".format(
entry.SubscriptionId))
if not self.notification_stat.get(node_name, None):
self.notification_stat[node_name] = {
subscriptionid: {
'EventTimestamp': this_delivery_time
}
}
LOG.debug("delivery time @node: {0},subscription:{1} is added".format(
node_name, subscriptionid))
elif not self.notification_stat[node_name].get(subscriptionid, None):
self.notification_stat[node_name][subscriptionid] = {
'EventTimestamp': this_delivery_time
}
LOG.debug("delivery time @node: {0},subscription:{1} is added".format(
node_name, subscriptionid))
else:
last_delivery_stat['EventTimestamp'] = this_delivery_time
LOG.debug("delivery time @node: {0},subscription:{1} is updated".format(
node_name, subscriptionid))
except Exception as ex:
LOG.warning("notification is not delivered to {0}:{1}".format(
entry.SubscriptionId, str(ex)))
# remove the entry
continue
finally:
pass
except Exception as ex:
LOG.warning("Failed to delivery notification:{0}".format(str(ex)))
result = False
finally:
self.notification_lock.release()
if not subscription_repo:
del subscription_repo
if result:
LOG.debug("Finished notification delivery")
else:
LOG.warning("Failed on notification delivery")
return result
def process_sync_node_event(self):
LOG.debug("Start processing sync node event")
need_to_sync_node_again = False
for broker_node_name, node_resources in self.node_resources_map.items():
try:
result = self.syncup_node(broker_node_name)
if not result:
need_to_sync_node_again = True
except Exception as ex:
LOG.warning("Failed to syncup node{0}:{1}".format(broker_node_name, str(ex)))
continue
if need_to_sync_node_again:
# continue try in to next loop
self.signal_node_sync_event()
self.signal_events()
LOG.debug("Finished processing sync node event")
def run(self):
# start location listener
self.__start_watch_all_nodes()
while True:
self.event.wait()
self.event.clear()
LOG.debug("daemon control event is asserted")
if self.location_event.is_set():
self.location_event.clear()
# process location notifications
self.consume_location_event()
if self.subscription_event.is_set():
self.subscription_event.clear()
# build node resources map from subscriptions
self.process_subscription_event()
if self.__node_info_event.is_set():
self.__node_info_event.clear()
# update node_resources_map from node info
self.__update_map_from_nodeinfos()
if self.__node_resources_event.is_set():
self.__node_resources_event.clear()
# update watchers from node_resources_map
self.__refresh_watchers_from_map()
if self.__node_sync_event.is_set():
self.__node_sync_event.clear()
# compensate for the possible loss of notification during reconnection
self.process_sync_node_event()
continue
return
def syncup_resource(self, broker_node_name, resource_type):
# check to sync up resource status on a node
LOG.debug("sync up resource@{0} :{1}".format(broker_node_name, resource_type))
try:
if broker_node_name == NodeInfoHelper.BROKER_NODE_ALL:
self.locationservice_client.trigger_publishing_status(
resource_type, timeout=5, retry=10)
return True
# 1, query resource status
broker_client = self.notificationservice_clients.get(broker_node_name, None)
if not broker_client:
raise Exception("notification service client is not setup for node {0}".format(broker_node_name))
resource_status = broker_client.query_resource_status(
resource_type, timeout=5, retry=10)
# 2, deliver resource by comparing LastDelivery time with EventTimestamp
# 3, update the LastDelivery with EventTimestamp
self.__NotificationWatcher.handle(resource_status)
except oslo_messaging.exceptions.MessagingTimeout as ex:
LOG.warning("Fail to syncup resource {0}@{1}, due to {2}".format(
resource_type, broker_node_name, str(ex)))
return False
except Exception as ex:
LOG.warning("Fail to syncup resource {0}@{1}, due to {2}".format(
resource_type, broker_node_name, str(ex)))
raise ex
finally:
pass
return True
def syncup_node(self, broker_node_name):
all_resource_synced = True
# check to sync up resources status on a node
node_resources = self.node_resources_map.get(broker_node_name, None)
if node_resources:
LOG.debug("sync up resources@{0} :{1}".format(broker_node_name, node_resources))
for resource_type, iteration in node_resources.items():
if iteration == self.node_resources_iteration:
result = self.syncup_resource(broker_node_name, resource_type)
if not result:
all_resource_synced = False
return all_resource_synced
def __cleanup_map(self):
for broker_node_name, node_resources in self.node_resources_map.items():
resourcetypelist = [r for (r, i) in node_resources.items() if i<self.node_resources_iteration]
for r in resourcetypelist:
node_resources.pop(r)
if len(node_resources) == 0:
self.node_resources_map[broker_node_name] = None
nodes = [n for (n, r) in self.node_resources_map.items() if not r]
for n in nodes:
self.node_resources_map.pop(n)
return
'''build map from subscriptions: {node_name:{resource_type:true}'''
def __build_map_from_subscriptions(self):
# increase iteration
self.node_resources_iteration = self.node_resources_iteration+1
subscription_repo = None
try:
subscription_repo = SubscriptionRepo(autocommit=True)
subs = subscription_repo.get()
LOG.debug("found {0} subscriptions".format(subs.count()))
for s in subs:
subinfo = SubscriptionInfo(s)
LOG.debug("subscription:{0}, Status:{1}".format(subinfo.to_dict(), s.Status))
# assume PTP and not wildcast
resource_type = s.ResourceType
if resource_type == ResourceType.TypePTP:
broker_node_name = subinfo.ResourceQualifier.NodeName
else:
# ignore the subscription due to unsupported type
LOG.debug("Ignore the subscription for: {0}".format(subinfo.SubscriptionId))
continue
if s.Status == 1:
current_node_name = NodeInfoHelper.expand_node_name(broker_node_name)
node_map = self.node_resources_map.get(current_node_name, None)
if not node_map:
node_map = {}
self.node_resources_map[current_node_name] = node_map
node_map[resource_type] = self.node_resources_iteration
# delete all entry with Status == 0
subscription_repo.delete(Status=0)
finally:
del subscription_repo
return True
def __update_map_from_nodeinfos(self):
'''Hanlde changes of ResourceTypes'''
node_resources_map_updated = False
result = False
nodeinfo_repo = NodeRepo(autocommit=True)
LOG.debug("Start node updating event")
try:
nodeinfos = nodeinfo_repo.get()
for nodeinfo in nodeinfos:
supported_resource_types = json.loads(nodeinfo.ResourceTypes or '[]')
node_map = self.node_resources_map.get(nodeinfo.NodeName, {})
for t, v in node_map.items():
if v == self.node_resources_iteration and not t in supported_resource_types:
# remove the resource type request by decrease the iteration
node_map[t] = self.node_resources_iteration - 1
node_resources_map_updated = True
LOG.warning("Detected unavailable resource type: {0}@{1}".format(t, nodeinfo.NodeName))
else:
continue
pass
except Exception as ex:
LOG.warning("Failed to update map from nodeinfo:{0}".format(str(ex)))
finally:
del nodeinfo_repo
LOG.debug("Finished node updating event")
if node_resources_map_updated:
self.signal_node_resources_event()
self.signal_events()
result = True
return result
def __start_watch_resource(self, broker_node_name, resource_type):
# 1, check and run notificationservice client
broker_client = self.notificationservice_clients.get(broker_node_name, None)
if not broker_client:
broker_client = self.__create_client(broker_node_name)
self.notificationservice_clients[broker_node_name] = broker_client
# 2, check and enable resource status watcher
if not broker_client.is_listening_on_resource(resource_type):
# must make sure the location is updated/watched:
# check and start location watcher
if not self.locationservice_client.is_listening_on_location(broker_node_name):
# start watching on the location announcement
self.locationservice_client.add_location_listener(
broker_node_name,
location_handler=self.__NodeInfoWatcher)
LOG.debug("Start watching location announcement of notificationservice@{0}"
.format(broker_node_name))
# try to update location by query
try:
self.locationservice_client.update_location(
broker_node_name, timeout=5, retry=2)
LOG.debug("Updated location of notificationservice@{0}".format(broker_node_name))
except Exception as ex:
LOG.warning("Failed to update location of node:{0} due to: {1}".format(
broker_node_name, str(ex)))
pass
broker_client.add_resource_status_listener(
resource_type, status_handler=self.__NotificationWatcher)
LOG.debug("Start watching {0}@{1}".format(resource_type, broker_node_name))
else:
# check if node_info has been updated, if yes, query the latest resource status
pass
return True
def __stop_watch_resource(self, broker_node_name, resource_type):
broker_client = self.notificationservice_clients.get(broker_node_name, None)
# 1, disable resource status watcher
if broker_client and broker_client.is_listening_on_resource(resource_type):
broker_client.remove_resource_status_listener(resource_type)
LOG.debug("Stop watching {0}@{1}".format(resource_type, broker_node_name))
return True
def __refresh_location_watcher(self):
# update location watchers
for broker_node_name, broker_client in self.notificationservice_clients.items():
if not broker_client:
continue
if broker_client.any_listener():
# check and start location watcher
if not self.locationservice_client.is_listening_on_location(broker_node_name):
# start watching on the location announcement
self.locationservice_client.add_location_listener(
broker_node_name,
location_handler=self.__NodeInfoWatcher)
LOG.debug("Start watching location announcement of notificationservice@{0}"
.format(broker_node_name))
# update location by query
try:
self.locationservice_client.update_location(
broker_node_name, timeout=5, retry=2)
LOG.debug("Updated location of notificationservice@{0}".format(broker_node_name))
except Exception as ex:
LOG.debug("Failed to Updated location of notificationservice@{0}".format(
broker_node_name))
continue
else:
pass
elif self.locationservice_client.is_listening_on_location(broker_node_name):
# 1, stop location listener
self.locationservice_client.remove_location_listener(broker_node_name)
LOG.debug("Stop watching location announcement for node@{0}"
.format(broker_node_name))
# 2, remove broker client
self.notificationservice_clients[broker_node_name] = None
del broker_client
LOG.debug("Stop watching notificationservice@{0}".format(broker_node_name))
else:
pass
return
def process_subscription_event(self):
# get subscriptions from DB
result = self.__build_map_from_subscriptions()
if result:
# need update map with nodeinfo after rebuilding the map
self.signal_nodeinfo_event()
self.signal_node_resources_event()
self.signal_events()
def __start_watch_all_nodes(self):
try:
if not self.locationservice_client.is_listening_on_location(
NodeInfoHelper.BROKER_NODE_ALL):
# start watching on the location announcement
self.locationservice_client.add_location_listener(
NodeInfoHelper.BROKER_NODE_ALL,
location_handler=self.__NodeInfoWatcher)
LOG.debug("Start watching location announcement of notificationservice@{0}"
.format(NodeInfoHelper.BROKER_NODE_ALL))
self.locationservice_client.trigger_location_annoucement(timeout=20, retry=10)
except Exception as ex:
LOG.debug("exception: {0}".format(str(ex)))
pass
finally:
pass
return
def __refresh_watchers_from_map(self):
try:
LOG.debug("refresh with {0} nodes".format(len(self.node_resources_map)))
for broker_node_name, node_resources in self.node_resources_map.items():
LOG.debug("check to watch resources@{0} :{1}".format(broker_node_name, node_resources))
for resource_type, iteration in node_resources.items():
# enable watchers
if iteration == self.node_resources_iteration:
self.__start_watch_resource(broker_node_name, resource_type)
else:
self.__stop_watch_resource(broker_node_name, resource_type)
self.__refresh_location_watcher()
self.__cleanup_map()
except Exception as ex:
LOG.debug("exception: {0}".format(str(ex)))
pass
finally:
pass
return
def __create_client(self, broker_node_name):
if broker_node_name == NodeInfoHelper.BROKER_NODE_ALL:
# special case: if monitor all node, then use the same broker as locationservice
return self.locationservice_client
broker_host = "notificationservice-{0}".format(broker_node_name)
broker_transport_endpoint = "rabbit://{0}:{1}@{2}:{3}".format(
self.daemon_context['NOTIFICATION_BROKER_USER'],
self.daemon_context['NOTIFICATION_BROKER_PASS'],
broker_host,
self.daemon_context['NOTIFICATION_BROKER_PORT'])
return NotificationServiceClient(broker_node_name, broker_transport_endpoint)
class DaemonControl(object):
def __init__(self, daemon_context, process_worker = None):
self.daemon_context = daemon_context
self.residing_node_name = daemon_context['THIS_NODE_NAME']
self.event = mp.Event()
self.subscription_event = mp.Event()
self.registration_endpoint = RpcEndpointInfo(daemon_context['REGISTRATION_TRANSPORT_ENDPOINT'])
self.registration_transport = rpc_helper.get_transport(self.registration_endpoint)
self.locationservice_client = LocationServiceClient(self.registration_endpoint.TransportEndpoint)
if not process_worker:
process_worker = ProcessWorkerDefault
self.mpinstance = mp.Process( target=process_worker, args=(
self.event, self.subscription_event, daemon_context))
self.mpinstance.start()
# initial update
self.refresh()
pass
def refresh(self):
self.subscription_event.set()
self.event.set()

View File

@ -0,0 +1,107 @@
import oslo_messaging
import logging
from notificationclientsdk.repository.node_repo import NodeRepo
from notificationclientsdk.repository.subscription_repo import SubscriptionRepo
from notificationclientsdk.model.dto.resourcetype import ResourceType
from notificationclientsdk.model.dto.subscription import SubscriptionInfo
from notificationclientsdk.common.helpers.nodeinfo_helper import NodeInfoHelper
from notificationclientsdk.model.orm.subscription import Subscription as SubscriptionOrm
from notificationclientsdk.client.notificationservice import NotificationServiceClient
from notificationclientsdk.services.daemon import DaemonControl
from notificationclientsdk.common.helpers import subscription_helper
LOG = logging.getLogger(__name__)
from notificationclientsdk.common.helpers import log_helper
log_helper.config_logger(LOG)
class PtpService(object):
def __init__(self, daemon_control):
self.daemon_control = daemon_control
self.locationservice_client = daemon_control.locationservice_client
self.subscription_repo = SubscriptionRepo(autocommit=False)
def __del__(self):
del self.subscription_repo
return
def query(self, broker_node_name):
broker_host = "notificationservice-{0}".format(broker_node_name)
broker_transport_endpoint = "rabbit://{0}:{1}@{2}:{3}".format(
self.daemon_control.daemon_context['NOTIFICATION_BROKER_USER'],
self.daemon_control.daemon_context['NOTIFICATION_BROKER_PASS'],
broker_host,
self.daemon_control.daemon_context['NOTIFICATION_BROKER_PORT'])
notificationservice_client = NotificationServiceClient(
broker_node_name, broker_transport_endpoint)
resource_status = notificationservice_client.query_resource_status(
ResourceType.TypePTP, timeout=5, retry=10)
del notificationservice_client
return resource_status
def add_subscription(self, subscription_dto):
subscription_orm = SubscriptionOrm(**subscription_dto.to_orm())
broker_node_name = subscription_dto.ResourceQualifier.NodeName
default_node_name = NodeInfoHelper.default_node_name(broker_node_name)
nodeinfos = NodeInfoHelper.enumerate_nodes(broker_node_name)
# 1, check node availability from DB
if not nodeinfos or not default_node_name in nodeinfos:
# update nodeinfo
try:
nodeinfo = self.locationservice_client.update_location(
default_node_name, timeout=5, retry=2)
except oslo_messaging.exceptions.MessagingTimeout as ex:
LOG.warning("node {0} cannot be reached due to {1}".format(
default_node_name, str(ex)))
raise ex
# 2, add to DB
entry = self.subscription_repo.add(subscription_orm)
# must commit the transaction to make it visible to daemon worker
self.subscription_repo.commit()
# 3, refresh daemon
self.daemon_control.refresh()
# 4, get initial resource status
if default_node_name:
ptpstatus = None
try:
ptpstatus = self.query(default_node_name)
LOG.info("initial ptpstatus:{0}".format(ptpstatus))
except oslo_messaging.exceptions.MessagingTimeout as ex:
LOG.warning("ptp status is not available @node {0} due to {1}".format(
default_node_name, str(ex)))
# remove the entry
self.subscription_repo.delete_one(SubscriptionId = entry.SubscriptionId)
self.subscription_repo.commit()
self.daemon_control.refresh()
raise ex
# 5, initial delivery of ptp status
subscription_dto2 = SubscriptionInfo(entry)
try:
subscription_helper.notify(subscription_dto2, ptpstatus)
LOG.info("initial ptpstatus is delivered successfully")
except Exception as ex:
LOG.warning("initial ptpstatus is not delivered:{0}".format(type(ex), str(ex)))
# remove the entry
self.subscription_repo.delete_one(SubscriptionId = entry.SubscriptionId)
self.subscription_repo.commit()
self.daemon_control.refresh()
subscription_dto2 = None
return subscription_dto2
def remove_subscription(self, subscriptionid):
try:
# 1, delete entry
self.subscription_repo.delete_one(SubscriptionId = subscriptionid)
self.subscription_repo.commit()
# 2, refresh daemon
self.daemon_control.refresh()
except Exception as ex:
LOG.warning("subscription {0} is not deleted due to:{1}/{2}".format(
self.subscriptionid, type(ex), str(ex)))
raise ex

View File

@ -0,0 +1,43 @@
body {
background: #311F00;
color: white;
font-family: 'Helvetica Neue', 'Helvetica', 'Verdana', sans-serif;
padding: 1em 2em;
}
a {
color: #FAFF78;
text-decoration: none;
}
a:hover {
text-decoration: underline;
}
div#content {
width: 800px;
margin: 0 auto;
}
form {
margin: 0;
padding: 0;
border: 0;
}
fieldset {
border: 0;
}
input.error {
background: #FAFF78;
}
header {
text-align: center;
}
h1, h2, h3, h4, h5, h6 {
font-family: 'Futura-CondensedExtraBold', 'Futura', 'Helvetica', sans-serif;
text-transform: uppercase;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.9 KiB

View File

@ -0,0 +1,6 @@
[nosetests]
match=^test
where=sidecar
nocapture=1
cover-package=sidecar
cover-erase=1

View File

@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='sidecar',
version='0.1',
description='',
author='',
author_email='',
install_requires=[
"pecan",
],
test_suite='sidecar',
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=['ez_setup'])
)

View File

@ -0,0 +1,11 @@
Metadata-Version: 1.0
Name: sidecar
Version: 0.1
Summary: sidecar offers a container image for vdu to
interact with notification service to get updates of resource status
Home-page: UNKNOWN
Author: Bin Yang
Author-email: bin.yang@windriver.com
License: Apache License 2.0
Description: UNKNOWN
Platform: UNKNOWN

View File

@ -0,0 +1,20 @@
MANIFEST.in
setup.cfg
setup.py
sidecar/__init__.py
sidecar/app.py
sidecar.egg-info/PKG-INFO
sidecar.egg-info/SOURCES.txt
sidecar.egg-info/dependency_links.txt
sidecar.egg-info/not-zip-safe
sidecar.egg-info/requires.txt
sidecar.egg-info/top_level.txt
sidecar/controllers/__init__.py
sidecar/controllers/root.py
sidecar/model/__init__.py
sidecar/tests/__init__.py
sidecar/tests/config.py
sidecar/tests/test_functional.py
sidecar/tests/test_units.py
public/css/style.css
public/images/logo.png

View File

@ -0,0 +1,29 @@
from pecan import make_app
from pecan.hooks import TransactionHook
from pecan import conf
from sidecar.repository.dbcontext_default import init_default_dbcontext, defaults
from sidecar.model import jsonify
def setup_app(config):
# important to register jsonify for models
jsonify.__init__()
default_dbcontext = init_default_dbcontext(conf.sqlalchemy)
app_conf = dict(config.app)
return make_app(
app_conf.pop('root'),
logging=getattr(config, 'logging', {}),
hooks=[
TransactionHook(
default_dbcontext.start,
default_dbcontext.start_read_only,
default_dbcontext.commit,
default_dbcontext.rollback,
default_dbcontext.clear
)
],
**app_conf
)

View File

@ -0,0 +1,65 @@
#coding=utf-8
from pecan import expose, redirect, rest, route, response
from webob.exc import status_map
import os
from wsme import types as wtypes
from wsmeext.pecan import wsexpose
THIS_NODE_NAME = os.environ.get("THIS_NODE_NAME",'controller-0')
from sidecar.controllers.v1.subscriptions import SubscriptionsController
from sidecar.controllers.v1.resource.ptp import PtpController
class HealthController(rest.RestController):
@wsexpose(wtypes.text)
def get(self):
return {'health': True}
class V1Controller(rest.RestController):
@wsexpose(wtypes.text)
def get(self):
return 'v1controller'
class ocloudDaemonController(rest.RestController):
# All supported API versions
_versions = ['v1']
# The default API version
_default_version = 'v1'
v1 = V1Controller()
@wsexpose(wtypes.text)
def get(self):
return 'ocloudNotification'
class RootController(object):
@expose(generic=True, template='json')
def index(self):
return dict()
@expose('json')
def error(self, status):
try:
status = int(status)
except ValueError: # pragma: no cover
status = 500
message = getattr(status_map.get(status), 'explanation', '')
return dict(status=status, message=message)
route(RootController, 'health', HealthController())
route(RootController, 'ocloudNotifications', ocloudDaemonController())
route(RootController, 'ocloudnotifications', ocloudDaemonController())
route(V1Controller, 'PTP', PtpController())
route(V1Controller, 'ptp', PtpController())
route(V1Controller, 'subscriptions', SubscriptionsController())

View File

@ -0,0 +1,55 @@
#coding=utf-8
from pecan import expose, redirect, rest, route, response, abort
from webob.exc import HTTPException, HTTPNotFound, HTTPBadRequest, HTTPClientError, HTTPServerError
from wsme import types as wtypes
from wsmeext.pecan import wsexpose
import os
import logging
from notificationclientsdk.services.ptp import PtpService
from sidecar.repository.notification_control import notification_control
LOG = logging.getLogger(__name__)
from notificationclientsdk.common.helpers import log_helper
log_helper.config_logger(LOG)
THIS_NODE_NAME = os.environ.get("THIS_NODE_NAME",'controller-0')
class CurrentStateController(rest.RestController):
def __init__(self):
pass
@expose('json')
def get(self):
try:
ptpservice = PtpService(notification_control)
ptpstatus = ptpservice.query(THIS_NODE_NAME)
# response.status = 200
return ptpstatus
except HTTPException as ex:
LOG.warning("Client side error:{0},{1}".format(type(ex), str(ex)))
# raise ex
abort(400)
except HTTPServerError as ex:
LOG.error("Server side error:{0},{1}".format(type(ex), str(ex)))
# raise ex
abort(500)
except Exception as ex:
LOG.error("Exception:{0}@{1}".format(type(ex),str(ex)))
abort(500)
class PtpController(rest.RestController):
def __init__(self):
pass
@wsexpose(wtypes.text)
def get(self):
return 'ptp'
route(PtpController, 'CurrentState', CurrentStateController())
route(PtpController, 'currentstate', CurrentStateController())

View File

@ -0,0 +1,157 @@
#coding=utf-8
from pecan import conf
from pecan import expose, redirect, rest, route, response, abort
from webob.exc import HTTPException, HTTPNotFound, HTTPBadRequest, HTTPClientError, HTTPServerError
import os
import logging
import oslo_messaging
from wsme import types as wtypes
from wsmeext.pecan import wsexpose
from notificationclientsdk.model.dto.resourcetype import ResourceType
from notificationclientsdk.model.dto.subscription import SubscriptionInfo
from notificationclientsdk.repository.subscription_repo import SubscriptionRepo
from notificationclientsdk.services.ptp import PtpService
from sidecar.repository.notification_control import notification_control
from sidecar.repository.dbcontext_default import defaults
LOG = logging.getLogger(__name__)
from notificationclientsdk.common.helpers import log_helper
log_helper.config_logger(LOG)
THIS_NODE_NAME = os.environ.get("THIS_NODE_NAME",'controller-0')
class SubscriptionsController(rest.RestController):
@wsexpose(SubscriptionInfo, body=SubscriptionInfo, status_code=201)
def post(self, subscription):
# decode the request body
try:
if subscription.ResourceType == ResourceType.TypePTP:
LOG.info(' subscribe: {0}, {1} with callback uri {2}'.format(
subscription.ResourceType,
subscription.ResourceQualifier.NodeName,
subscription.EndpointUri))
else:
LOG.warning(' Subscribe with unsupported ResourceType:{0}'.format(
subscription.ResourceType))
abort(404)
if not self._validate(subscription):
LOG.warning(' Invalid Request data:{0}'.format(subscription.to_dict()))
abort(400)
subscription.UriLocation = "{0}://{1}:{2}/ocloudNotifications/v1/subscriptions".format(
conf.server.get('protocol','http'),
conf.server.get('host', '127.0.01'),
conf.server.get('port', '8080')
)
if subscription.ResourceType == ResourceType.TypePTP:
ptpservice = PtpService(notification_control)
entry = ptpservice.add_subscription(subscription)
del ptpservice
if not entry:
abort(404)
subscription.SubscriptionId = entry.SubscriptionId
subscription.UriLocation = entry.UriLocation
LOG.info('created subscription: {0}'.format(subscription.to_dict()))
return subscription
except oslo_messaging.exceptions.MessagingTimeout as ex:
abort(404)
except HTTPException as ex:
LOG.warning("Client side error:{0},{1}".format(type(ex), str(ex)))
raise ex
except HTTPServerError as ex:
LOG.error("Server side error:{0},{1}".format(type(ex), str(ex)))
raise ex
except Exception as ex:
LOG.error("Exception:{0}@{1}".format(type(ex),str(ex)))
abort(500)
@expose('json')
def get(self):
try:
repo = SubscriptionRepo(defaults['dbcontext'].get_session(), autocommit = False)
entries = repo.get(Status=1)
response.status = 200
return [SubscriptionInfo(x).to_dict() for x in entries if x.Status == 1]
except HTTPException as ex:
LOG.warning("Client side error:{0},{1}".format(type(ex), str(ex)))
raise ex
except HTTPServerError as ex:
LOG.error("Server side error:{0},{1}".format(type(ex), str(ex)))
raise ex
except Exception as ex:
LOG.error("Exception:{0}@{1}".format(type(ex),str(ex)))
abort(500)
@expose()
def _lookup(self, subscription_id, *remainder):
return SubscriptionController(subscription_id), remainder
def _validate(self, subscription_request):
try:
assert subscription_request.ResourceType == 'PTP'
assert subscription_request.EndpointUri
return True
except:
return False
class SubscriptionController(rest.RestController):
def __init__(self, subscription_id):
self.subscription_id = subscription_id
@expose('json')
def get(self):
try:
repo = SubscriptionRepo(defaults['dbcontext'].get_session(), autocommit = False)
entry = repo.get_one(SubscriptionId=self.subscription_id, Status=1)
if not entry:
abort(404)
else:
response.status = 200
return SubscriptionInfo(entry).to_dict()
except HTTPException as ex:
LOG.warning("Client side error:{0},{1}".format(type(ex), str(ex)))
raise ex
except HTTPServerError as ex:
LOG.error("Server side error:{0},{1}".format(type(ex), str(ex)))
raise ex
except Exception as ex:
LOG.error("Exception:{0}@{1}".format(type(ex),str(ex)))
abort(500)
@wsexpose(status_code=204)
def delete(self):
try:
repo = SubscriptionRepo(defaults['dbcontext'].get_session(), autocommit = False)
entry = repo.get_one(SubscriptionId=self.subscription_id)
if entry:
if entry.ResourceType == ResourceType.TypePTP:
ptpservice = PtpService(notification_control)
ptpservice.remove_subscription(entry.SubscriptionId)
del ptpservice
return
else:
repo.delete_one(SubscriptionId=self.subscription_id)
return
abort(404)
except HTTPException as ex:
LOG.warning("Client side error:{0},{1}".format(type(ex), str(ex)))
raise ex
except HTTPServerError as ex:
LOG.error("Server side error:{0},{1}".format(type(ex), str(ex)))
raise ex
except Exception as ex:
LOG.error("Exception:{0}@{1}".format(type(ex),str(ex)))
abort(500)

View File

@ -0,0 +1,17 @@
from pecan import conf # noqa
def init_model():
"""
This is a stub method which is called at application startup time.
If you need to bind to a parsed database configuration, set up tables or
ORM classes, or perform any database initialization, this is the
recommended place to do it.
For more information working with databases, and some common recipes,
see https://pecan.readthedocs.io/en/latest/databases.html
"""
pass

View File

@ -0,0 +1,15 @@
from notificationclientsdk.model.dto.subscription import SubscriptionInfo
from notificationclientsdk.model.dto.subscription import ResourceQualifierPtp
from pecan.jsonify import jsonify
@jsonify.register(SubscriptionInfo)
def jsonify_subscriptioninfo(subscriptionInfo):
return subscriptionInfo.to_dict()
@jsonify.register(ResourceQualifierPtp)
def jsonify_resourcequalifierptp(resourceQualifierPtp):
return resourceQualifierPtp.to_dict()
def __init__():
pass

View File

@ -0,0 +1,12 @@
from notificationclientsdk.repository.dbcontext import DbContext
defaults = {
'dbcontext': None
}
def init_default_dbcontext(sqlalchemy_conf):
global defaults
DbContext.init_dbcontext(sqlalchemy_conf)
default_dbcontext = DbContext()
defaults['dbcontext'] = default_dbcontext
return default_dbcontext

View File

@ -0,0 +1,36 @@
import os
import json
from pecan import conf
from notificationclientsdk.services.daemon import DaemonControl
from notificationclientsdk.common.helpers.nodeinfo_helper import NodeInfoHelper
REGISTRATION_USER = os.environ.get("REGISTRATION_USER", "admin")
REGISTRATION_PASS = os.environ.get("REGISTRATION_PASS", "admin")
REGISTRATION_PORT = os.environ.get("REGISTRATION_PORT", "5672")
REGISTRATION_HOST = os.environ.get("REGISTRATION_HOST",'registration.notification.svc.cluster.local')
THIS_NODE_NAME = os.environ.get("THIS_NODE_NAME",'controller-0')
THIS_POD_IP = os.environ.get("THIS_POD_IP",'127.0.0.1')
NOTIFICATION_BROKER_USER = os.environ.get("NOTIFICATIONSERVICE_USER", "admin")
NOTIFICATION_BROKER_PASS = os.environ.get("NOTIFICATIONSERVICE_PASS", "admin")
NOTIFICATION_BROKER_PORT = os.environ.get("NOTIFICATIONSERVICE_PORT", "5672")
REGISTRATION_TRANSPORT_ENDPOINT = 'rabbit://{0}:{1}@{2}:{3}'.format(
REGISTRATION_USER, REGISTRATION_PASS, REGISTRATION_HOST, REGISTRATION_PORT)
sqlalchemy_conf = dict(conf.sqlalchemy)
if sqlalchemy_conf.get('engine', None):
sqlalchemy_conf.pop('engine')
sqlalchemy_conf_json = json.dumps(sqlalchemy_conf)
daemon_context = {
'SQLALCHEMY_CONF_JSON': sqlalchemy_conf_json,
'THIS_NODE_NAME': THIS_NODE_NAME,
'REGISTRATION_TRANSPORT_ENDPOINT': REGISTRATION_TRANSPORT_ENDPOINT,
'NOTIFICATION_BROKER_USER': NOTIFICATION_BROKER_USER,
'NOTIFICATION_BROKER_PASS': NOTIFICATION_BROKER_PASS,
'NOTIFICATION_BROKER_PORT': NOTIFICATION_BROKER_PORT
}
notification_control = DaemonControl(daemon_context)
NodeInfoHelper.set_residing_node(THIS_NODE_NAME)

View File

@ -0,0 +1,22 @@
import os
from unittest import TestCase
from pecan import set_config
from pecan.testing import load_test_app
__all__ = ['FunctionalTest']
class FunctionalTest(TestCase):
"""
Used for functional tests where you need to test your
literal application and its integration with the framework.
"""
def setUp(self):
self.app = load_test_app(os.path.join(
os.path.dirname(__file__),
'config.py'
))
def tearDown(self):
set_config({}, overwrite=True)

View File

@ -0,0 +1,25 @@
# Server Specific Configurations
server = {
'port': '8080',
'host': '0.0.0.0'
}
# Pecan Application Configurations
app = {
'root': 'notificationclient.controllers.root.RootController',
'modules': ['notificationclient'],
'static_root': '%(confdir)s/../../public',
'template_path': '%(confdir)s/../templates',
'debug': True,
'errors': {
'404': '/error/404',
'__force_dict__': True
}
}
# Custom Configurations must be in Python dictionary format::
#
# foo = {'bar':'baz'}
#
# All configurations are accessible at::
# pecan.conf

View File

@ -0,0 +1,22 @@
from unittest import TestCase
from webtest import TestApp
from notificationclient.tests import FunctionalTest
class TestRootController(FunctionalTest):
def test_get(self):
response = self.app.get('/')
assert response.status_int == 200
def test_search(self):
response = self.app.post('/', params={'q': 'RestController'})
assert response.status_int == 302
assert response.headers['Location'] == (
'https://pecan.readthedocs.io/en/latest/search.html'
'?q=RestController'
)
def test_get_not_found(self):
response = self.app.get('/a/bogus/url', expect_errors=True)
assert response.status_int == 404

View File

@ -0,0 +1,7 @@
from unittest import TestCase
class TestUnits(TestCase):
def test_units(self):
assert 5 * 5 == 25

View File

@ -0,0 +1,2 @@
BUILDER=docker
LABEL=notificationclient-base

View File

@ -0,0 +1,22 @@
ARG BASE
FROM ${BASE}
ARG STX_REPO_FILE=/etc/yum.repos.d/stx.repo
ENV KUBE_LATEST_VERSION="v1.18.3"
RUN set -ex ;\
yum install --disablerepo=* \
$(grep '^name=' ${STX_REPO_FILE} | awk -F '=' '{printf "--enablerepo=" $2 " "}') \
-y \
gcc python3-devel python3-pip \
&& pip3 install --user pecan \
&& pip3 install oslo-config \
&& pip3 install oslo-messaging \
&& pip3 install WSME
WORKDIR /opt/
COPY ./ptptrackingfunction /opt/ptptrackingfunction
RUN cd /opt/ptptrackingfunction && python3 setup.py develop
CMD ["bash"]

View File

@ -0,0 +1,6 @@
[nosetests]
match=^test
where=ptptrackingfunction
nocapture=1
cover-package=ptptrackingfunction
cover-erase=1

View File

@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='ptptrackingfunction',
version='0.1',
description='',
author='',
author_email='',
install_requires=[
"",
],
test_suite='ptptrackingfunction',
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=['ez_setup'])
)

View File

@ -0,0 +1,107 @@
import os
import json
import time
import oslo_messaging
from oslo_config import cfg
from trackingfunctionsdk.common.helpers import rpc_helper
from trackingfunctionsdk.model.dto.rpc_endpoint import RpcEndpointInfo
import logging
LOG = logging.getLogger(__name__)
from trackingfunctionsdk.common.helpers import log_helper
log_helper.config_logger(LOG)
class BrokerClientBase(object):
def __init__(self, broker_name, broker_transport_endpoint):
self.broker_name = broker_name
self.listeners = {}
self.broker_endpoint = RpcEndpointInfo(broker_transport_endpoint)
self.transport = rpc_helper.get_transport(self.broker_endpoint)
LOG.debug("Created Broker client:{0}".format(broker_name))
def __del__(self):
self.transport.cleanup()
del self.transport
return
def __create_listener(self, context):
target = oslo_messaging.Target(
topic=context['topic'],
server=context['server'])
endpoints = context['endpoints']
server = oslo_messaging.get_rpc_server(
self.transport, target, endpoints, executor=None)
return server
def _refresh(self):
for topic, servers in self.listeners.items():
for servername, context in servers.items():
try:
rpcserver = context.get('rpcserver', None)
isactive = context.get('active', False)
if isactive and not rpcserver:
rpcserver = self.__create_listener(context)
rpcserver.start()
context['rpcserver'] = rpcserver
LOG.debug("Started rpcserver@{0}@{1}".format(context['topic'], context['server']))
elif not isactive and rpcserver:
rpcserver.stop()
rpcserver.wait()
context.pop('rpcserver')
LOG.debug("Stopped rpcserver@{0}@{1}".format(context['topic'], context['server']))
except:
LOG.error("Failed to update listener for topic/server:{0}/{1}"
.format(topic, servername))
continue
def add_listener(self, topic, server, listener_endpoints=None):
context = self.listeners.get(topic,{}).get(server, {})
if not context:
context = {
'endpoints': listener_endpoints,
'topic': topic,
'server': server,
'active': True
}
if not self.listeners.get(topic, None):
self.listeners[topic] = {}
self.listeners[topic][server] = context
else:
context['endpoints'] = listener_endpoints
context['active'] = True
self._refresh()
def remove_listener(self, topic, server):
context = self.listeners.get(topic,{}).get(server, {})
if context:
context['active'] = False
self._refresh()
def is_listening(self, topic, server):
context = self.listeners.get(topic,{}).get(server, {})
return context.get('active', False)
def any_listener(self):
for topic, servers in self.listeners.items():
for servername, context in servers.items():
isactive = context.get('active', False)
if isactive:
return True
return False
def call(self, topic, server, api_name, timeout=2, retry=0, **api_kwargs):
target = oslo_messaging.Target(
topic=topic, server=server, version=self.broker_endpoint.Version,
namespace=self.broker_endpoint.Namespace)
queryclient = oslo_messaging.RPCClient(self.transport, target, timeout = timeout, retry = retry)
return queryclient.call({}, api_name, **api_kwargs)
def cast(self, topic, api_name, **api_kwargs):
target = oslo_messaging.Target(
topic=topic, fanout=True, version=self.broker_endpoint.Version,
namespace=self.broker_endpoint.Namespace)
queryclient = oslo_messaging.RPCClient(self.transport, target)
queryclient.cast({}, api_name, **api_kwargs)

Some files were not shown because too many files have changed in this diff Show More