Move files out of the namespace package
Move the public API out of oslo.messaging to oslo_messaging. Retain the ability to import from the old namespace package for backwards compatibility for this release cycle. bp/drop-namespace-packages Co-authored-by: Mehdi Abaakouk <mehdi.abaakouk@enovance.com> Change-Id: Ia562010c152a214f1c0fed767c82022c7c2c52e7
This commit is contained in:
parent
8102f25dd2
commit
e55a83e832
@ -2,7 +2,7 @@
|
||||
AMQP 1.0 Protocol Support
|
||||
-------------------------
|
||||
|
||||
.. currentmodule:: oslo.messaging
|
||||
.. currentmodule:: oslo_messaging
|
||||
|
||||
============
|
||||
Introduction
|
||||
|
@ -2,7 +2,7 @@
|
||||
Testing Configurations
|
||||
----------------------
|
||||
|
||||
.. currentmodule:: oslo.messaging.conffixture
|
||||
.. currentmodule:: oslo_messaging.conffixture
|
||||
|
||||
.. autoclass:: ConfFixture
|
||||
:members:
|
||||
|
@ -2,7 +2,7 @@
|
||||
Exceptions
|
||||
----------
|
||||
|
||||
.. currentmodule:: oslo.messaging
|
||||
.. currentmodule:: oslo_messaging
|
||||
|
||||
.. autoexception:: ClientSendError
|
||||
.. autoexception:: DriverLoadFailure
|
||||
|
@ -2,9 +2,9 @@
|
||||
Executors
|
||||
---------
|
||||
|
||||
.. automodule:: oslo.messaging._executors
|
||||
.. automodule:: oslo_messaging._executors
|
||||
|
||||
.. currentmodule:: oslo.messaging
|
||||
.. currentmodule:: oslo_messaging
|
||||
|
||||
==============
|
||||
Executor types
|
||||
|
@ -2,9 +2,9 @@
|
||||
Notification Listener
|
||||
---------------------
|
||||
|
||||
.. automodule:: oslo.messaging.notify.listener
|
||||
.. automodule:: oslo_messaging.notify.listener
|
||||
|
||||
.. currentmodule:: oslo.messaging
|
||||
.. currentmodule:: oslo_messaging
|
||||
|
||||
.. autofunction:: get_notification_listener
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
Notifier
|
||||
--------
|
||||
|
||||
.. currentmodule:: oslo.messaging
|
||||
.. currentmodule:: oslo_messaging
|
||||
|
||||
.. autoclass:: Notifier
|
||||
:members:
|
||||
|
@ -2,6 +2,6 @@
|
||||
Configuration Options
|
||||
----------------------
|
||||
|
||||
.. currentmodule:: oslo.messaging.opts
|
||||
.. currentmodule:: oslo_messaging.opts
|
||||
|
||||
.. autofunction:: list_opts
|
||||
|
@ -2,7 +2,7 @@
|
||||
RPC Client
|
||||
----------
|
||||
|
||||
.. currentmodule:: oslo.messaging
|
||||
.. currentmodule:: oslo_messaging
|
||||
|
||||
.. autoclass:: RPCClient
|
||||
:members:
|
||||
|
@ -2,7 +2,7 @@
|
||||
Serializer
|
||||
----------
|
||||
|
||||
.. currentmodule:: oslo.messaging
|
||||
.. currentmodule:: oslo_messaging
|
||||
|
||||
.. autoclass:: Serializer
|
||||
:members:
|
||||
|
@ -2,9 +2,9 @@
|
||||
Server
|
||||
------
|
||||
|
||||
.. automodule:: oslo.messaging.rpc.server
|
||||
.. automodule:: oslo_messaging.rpc.server
|
||||
|
||||
.. currentmodule:: oslo.messaging
|
||||
.. currentmodule:: oslo_messaging
|
||||
|
||||
.. autofunction:: get_rpc_server
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
Target
|
||||
------
|
||||
|
||||
.. currentmodule:: oslo.messaging
|
||||
.. currentmodule:: oslo_messaging
|
||||
|
||||
.. autoclass:: Target
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
Transport
|
||||
---------
|
||||
|
||||
.. currentmodule:: oslo.messaging
|
||||
.. currentmodule:: oslo_messaging
|
||||
|
||||
.. autofunction:: get_transport
|
||||
|
||||
|
@ -13,6 +13,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import warnings
|
||||
|
||||
from .exceptions import *
|
||||
from .localcontext import *
|
||||
from .notify import *
|
||||
@ -21,3 +23,16 @@ from .serializer import *
|
||||
from .server import *
|
||||
from .target import *
|
||||
from .transport import *
|
||||
|
||||
|
||||
def deprecated():
|
||||
new_name = __name__.replace('.', '_')
|
||||
warnings.warn(
|
||||
('The oslo namespace package is deprecated. Please use %s instead.' %
|
||||
new_name),
|
||||
DeprecationWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
|
||||
deprecated()
|
||||
|
@ -1,6 +1,3 @@
|
||||
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -13,66 +10,4 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
__all__ = ['ConfFixture']
|
||||
|
||||
import sys
|
||||
|
||||
import fixtures
|
||||
|
||||
|
||||
def _import_opts(conf, module, opts):
|
||||
__import__(module)
|
||||
conf.register_opts(getattr(sys.modules[module], opts))
|
||||
|
||||
|
||||
class ConfFixture(fixtures.Fixture):
|
||||
|
||||
"""Tweak configuration options for unit testing.
|
||||
|
||||
oslo.messaging registers a number of configuration options, but rather than
|
||||
directly referencing those options, users of the API should use this
|
||||
interface for querying and overriding certain configuration options.
|
||||
|
||||
An example usage::
|
||||
|
||||
self.messaging_conf = self.useFixture(messaging.ConfFixture(cfg.CONF))
|
||||
self.messaging_conf.transport_driver = 'fake'
|
||||
|
||||
:param conf: a ConfigOpts instance
|
||||
:type conf: oslo.config.cfg.ConfigOpts
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
self.conf = conf
|
||||
_import_opts(self.conf,
|
||||
'oslo.messaging._drivers.impl_rabbit', 'rabbit_opts')
|
||||
_import_opts(self.conf,
|
||||
'oslo.messaging._drivers.impl_qpid', 'qpid_opts')
|
||||
_import_opts(self.conf,
|
||||
'oslo.messaging._drivers.amqp', 'amqp_opts')
|
||||
_import_opts(self.conf, 'oslo.messaging.rpc.client', '_client_opts')
|
||||
_import_opts(self.conf, 'oslo.messaging.transport', '_transport_opts')
|
||||
_import_opts(self.conf,
|
||||
'oslo.messaging.notify.notifier', '_notifier_opts')
|
||||
|
||||
def setUp(self):
|
||||
super(ConfFixture, self).setUp()
|
||||
self.addCleanup(self.conf.reset)
|
||||
|
||||
@property
|
||||
def transport_driver(self):
|
||||
"""The transport driver - for example 'rabbit', 'qpid' or 'fake'."""
|
||||
return self.conf.rpc_backend
|
||||
|
||||
@transport_driver.setter
|
||||
def transport_driver(self, value):
|
||||
self.conf.set_override('rpc_backend', value)
|
||||
|
||||
@property
|
||||
def response_timeout(self):
|
||||
"""Default number of seconds to wait for a response from a call."""
|
||||
return self.conf.rpc_response_timeout
|
||||
|
||||
@response_timeout.setter
|
||||
def response_timeout(self, value):
|
||||
self.conf.set_override('rpc_response_timeout', value)
|
||||
from oslo_messaging.conffixture import * # noqa
|
||||
|
@ -1,6 +1,3 @@
|
||||
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -13,28 +10,4 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
__all__ = ['MessagingException', 'MessagingTimeout', 'MessageDeliveryFailure',
|
||||
'InvalidTarget']
|
||||
|
||||
import six
|
||||
|
||||
|
||||
class MessagingException(Exception):
|
||||
"""Base class for exceptions."""
|
||||
|
||||
|
||||
class MessagingTimeout(MessagingException):
|
||||
"""Raised if message sending times out."""
|
||||
|
||||
|
||||
class MessageDeliveryFailure(MessagingException):
|
||||
"""Raised if message sending failed after the asked retry."""
|
||||
|
||||
|
||||
class InvalidTarget(MessagingException, ValueError):
|
||||
"""Raised if a target does not meet certain pre-conditions."""
|
||||
|
||||
def __init__(self, msg, target):
|
||||
msg = msg + ":" + six.text_type(target)
|
||||
super(InvalidTarget, self).__init__(msg)
|
||||
self.target = target
|
||||
from oslo_messaging.exceptions import * # noqa
|
||||
|
@ -1,6 +1,3 @@
|
||||
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -13,43 +10,4 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
__all__ = [
|
||||
'get_local_context',
|
||||
'set_local_context',
|
||||
'clear_local_context',
|
||||
]
|
||||
|
||||
import threading
|
||||
import uuid
|
||||
|
||||
_KEY = '_%s_%s' % (__name__.replace('.', '_'), uuid.uuid4().hex)
|
||||
_STORE = threading.local()
|
||||
|
||||
|
||||
def get_local_context(ctxt):
|
||||
"""Retrieve the RPC endpoint request context for the current thread.
|
||||
|
||||
This method allows any code running in the context of a dispatched RPC
|
||||
endpoint method to retrieve the context for this request.
|
||||
|
||||
This is commonly used for logging so that, for example, you can include the
|
||||
request ID, user and tenant in every message logged from a RPC endpoint
|
||||
method.
|
||||
|
||||
:returns: the context for the request dispatched in the current thread
|
||||
"""
|
||||
return getattr(_STORE, _KEY, None)
|
||||
|
||||
|
||||
def set_local_context(ctxt):
|
||||
"""Set the request context for the current thread.
|
||||
|
||||
:param ctxt: a deserialized request context
|
||||
:type ctxt: dict
|
||||
"""
|
||||
setattr(_STORE, _KEY, ctxt)
|
||||
|
||||
|
||||
def clear_local_context():
|
||||
"""Clear the request context for the current thread."""
|
||||
delattr(_STORE, _KEY)
|
||||
from oslo_messaging.localcontext import * # noqa
|
||||
|
@ -1,7 +1,3 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2013 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -14,122 +10,4 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import contextlib
|
||||
import itertools
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from oslo.messaging import localcontext
|
||||
from oslo.messaging import serializer as msg_serializer
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
PRIORITIES = ['audit', 'debug', 'info', 'warn', 'error', 'critical', 'sample']
|
||||
|
||||
|
||||
class NotificationResult(object):
|
||||
HANDLED = 'handled'
|
||||
REQUEUE = 'requeue'
|
||||
|
||||
|
||||
class NotificationDispatcher(object):
|
||||
"""A message dispatcher which understands Notification messages.
|
||||
|
||||
A MessageHandlingServer is constructed by passing a callable dispatcher
|
||||
which is invoked with context and message dictionaries each time a message
|
||||
is received.
|
||||
|
||||
NotifcationDispatcher is one such dispatcher which pass a raw notification
|
||||
message to the endpoints
|
||||
"""
|
||||
|
||||
def __init__(self, targets, endpoints, serializer, allow_requeue,
|
||||
pool=None):
|
||||
self.targets = targets
|
||||
self.endpoints = endpoints
|
||||
self.serializer = serializer or msg_serializer.NoOpSerializer()
|
||||
self.allow_requeue = allow_requeue
|
||||
self.pool = pool
|
||||
|
||||
self._callbacks_by_priority = {}
|
||||
for endpoint, prio in itertools.product(endpoints, PRIORITIES):
|
||||
if hasattr(endpoint, prio):
|
||||
method = getattr(endpoint, prio)
|
||||
self._callbacks_by_priority.setdefault(prio, []).append(method)
|
||||
|
||||
priorities = self._callbacks_by_priority.keys()
|
||||
self._targets_priorities = set(itertools.product(self.targets,
|
||||
priorities))
|
||||
|
||||
def _listen(self, transport):
|
||||
return transport._listen_for_notifications(self._targets_priorities,
|
||||
pool=self.pool)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def __call__(self, incoming, executor_callback=None):
|
||||
result_wrapper = []
|
||||
|
||||
yield lambda: result_wrapper.append(
|
||||
self._dispatch_and_handle_error(incoming, executor_callback))
|
||||
|
||||
if result_wrapper[0] == NotificationResult.HANDLED:
|
||||
incoming.acknowledge()
|
||||
else:
|
||||
incoming.requeue()
|
||||
|
||||
def _dispatch_and_handle_error(self, incoming, executor_callback):
|
||||
"""Dispatch a notification message to the appropriate endpoint method.
|
||||
|
||||
:param incoming: the incoming notification message
|
||||
:type ctxt: IncomingMessage
|
||||
"""
|
||||
try:
|
||||
return self._dispatch(incoming.ctxt, incoming.message,
|
||||
executor_callback)
|
||||
except Exception:
|
||||
# sys.exc_info() is deleted by LOG.exception().
|
||||
exc_info = sys.exc_info()
|
||||
LOG.error('Exception during message handling',
|
||||
exc_info=exc_info)
|
||||
return NotificationResult.HANDLED
|
||||
|
||||
def _dispatch(self, ctxt, message, executor_callback=None):
|
||||
"""Dispatch an RPC message to the appropriate endpoint method.
|
||||
|
||||
:param ctxt: the request context
|
||||
:type ctxt: dict
|
||||
:param message: the message payload
|
||||
:type message: dict
|
||||
"""
|
||||
ctxt = self.serializer.deserialize_context(ctxt)
|
||||
|
||||
publisher_id = message.get('publisher_id')
|
||||
event_type = message.get('event_type')
|
||||
metadata = {
|
||||
'message_id': message.get('message_id'),
|
||||
'timestamp': message.get('timestamp')
|
||||
}
|
||||
priority = message.get('priority', '').lower()
|
||||
if priority not in PRIORITIES:
|
||||
LOG.warning('Unknown priority "%s"', priority)
|
||||
return
|
||||
|
||||
payload = self.serializer.deserialize_entity(ctxt,
|
||||
message.get('payload'))
|
||||
|
||||
for callback in self._callbacks_by_priority.get(priority, []):
|
||||
localcontext.set_local_context(ctxt)
|
||||
try:
|
||||
if executor_callback:
|
||||
ret = executor_callback(callback, ctxt, publisher_id,
|
||||
event_type, payload, metadata)
|
||||
else:
|
||||
ret = callback(ctxt, publisher_id, event_type, payload,
|
||||
metadata)
|
||||
ret = NotificationResult.HANDLED if ret is None else ret
|
||||
if self.allow_requeue and ret == NotificationResult.REQUEUE:
|
||||
return ret
|
||||
finally:
|
||||
localcontext.clear_local_context()
|
||||
return NotificationResult.HANDLED
|
||||
from oslo_messaging.notify.dispatcher import * # noqa
|
||||
|
@ -1,7 +1,3 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2013 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -13,125 +9,5 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
A notification listener exposes a number of endpoints, each of which
|
||||
contain a set of methods. Each method corresponds to a notification priority.
|
||||
|
||||
To create a notification listener, you supply a transport, list of targets and
|
||||
a list of endpoints.
|
||||
|
||||
A transport can be obtained simply by calling the get_transport() method::
|
||||
|
||||
transport = messaging.get_transport(conf)
|
||||
|
||||
which will load the appropriate transport driver according to the user's
|
||||
messaging configuration configuration. See get_transport() for more details.
|
||||
|
||||
The target supplied when creating a notification listener expresses the topic
|
||||
and - optionally - the exchange to listen on. See Target for more details
|
||||
on these attributes.
|
||||
|
||||
Notification listener have start(), stop() and wait() messages to begin
|
||||
handling requests, stop handling requests and wait for all in-process
|
||||
requests to complete.
|
||||
|
||||
Each notification listener is associated with an executor which integrates the
|
||||
listener with a specific I/O handling framework. Currently, there are blocking
|
||||
and eventlet executors available.
|
||||
|
||||
A simple example of a notification listener with multiple endpoints might be::
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo import messaging
|
||||
|
||||
class NotificationEndpoint(object):
|
||||
def warn(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
do_something(payload)
|
||||
|
||||
class ErrorEndpoint(object):
|
||||
def error(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
do_something(payload)
|
||||
|
||||
transport = messaging.get_transport(cfg.CONF)
|
||||
targets = [
|
||||
messaging.Target(topic='notifications')
|
||||
messaging.Target(topic='notifications_bis')
|
||||
]
|
||||
endpoints = [
|
||||
NotificationEndpoint(),
|
||||
ErrorEndpoint(),
|
||||
]
|
||||
pool = "listener-workers"
|
||||
server = messaging.get_notification_listener(transport, targets, endpoints,
|
||||
pool)
|
||||
server.start()
|
||||
server.wait()
|
||||
|
||||
A notifier sends a notification on a topic with a priority, the notification
|
||||
listener will receive this notification if the topic of this one have been set
|
||||
in one of the targets and if an endpoint implements the method named like the
|
||||
priority
|
||||
|
||||
Parameters to endpoint methods are the request context supplied by the client,
|
||||
the publisher_id of the notification message, the event_type, the payload and
|
||||
metadata. The metadata parameter is a mapping containing a unique message_id
|
||||
and a timestamp.
|
||||
|
||||
By supplying a serializer object, a listener can deserialize a request context
|
||||
and arguments from - and serialize return values to - primitive types.
|
||||
|
||||
By supplying a pool name you can create multiple groups of listeners consuming
|
||||
notifications and that each group only receives one copy of each
|
||||
notification.
|
||||
|
||||
An endpoint method can explicitly return messaging.NotificationResult.HANDLED
|
||||
to acknowledge a message or messaging.NotificationResult.REQUEUE to requeue the
|
||||
message.
|
||||
|
||||
The message is acknowledged only if all endpoints either return
|
||||
messaging.NotificationResult.HANDLED or None.
|
||||
|
||||
Note that not all transport drivers implement support for requeueing. In order
|
||||
to use this feature, applications should assert that the feature is available
|
||||
by passing allow_requeue=True to get_notification_listener(). If the driver
|
||||
does not support requeueing, it will raise NotImplementedError at this point.
|
||||
"""
|
||||
|
||||
from oslo.messaging.notify import dispatcher as notify_dispatcher
|
||||
from oslo.messaging import server as msg_server
|
||||
|
||||
|
||||
def get_notification_listener(transport, targets, endpoints,
|
||||
executor='blocking', serializer=None,
|
||||
allow_requeue=False, pool=None):
|
||||
"""Construct a notification listener
|
||||
|
||||
The executor parameter controls how incoming messages will be received and
|
||||
dispatched. By default, the most simple executor is used - the blocking
|
||||
executor.
|
||||
|
||||
If the eventlet executor is used, the threading and time library need to be
|
||||
monkeypatched.
|
||||
|
||||
:param transport: the messaging transport
|
||||
:type transport: Transport
|
||||
:param targets: the exchanges and topics to listen on
|
||||
:type targets: list of Target
|
||||
:param endpoints: a list of endpoint objects
|
||||
:type endpoints: list
|
||||
:param executor: name of a message executor - for example
|
||||
'eventlet', 'blocking'
|
||||
:type executor: str
|
||||
:param serializer: an optional entity serializer
|
||||
:type serializer: Serializer
|
||||
:param allow_requeue: whether NotificationResult.REQUEUE support is needed
|
||||
:type allow_requeue: bool
|
||||
:param pool: the pool name
|
||||
:type pool: str
|
||||
:raises: NotImplementedError
|
||||
"""
|
||||
transport._require_driver_features(requeue=allow_requeue)
|
||||
dispatcher = notify_dispatcher.NotificationDispatcher(targets, endpoints,
|
||||
serializer,
|
||||
allow_requeue, pool)
|
||||
return msg_server.MessageHandlingServer(transport, dispatcher, executor)
|
||||
from oslo_messaging.notify.listener import * # noqa
|
||||
|
@ -1,41 +1,13 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
|
||||
class LoggingErrorNotificationHandler(logging.Handler):
|
||||
def __init__(self, *args, **kwargs):
|
||||
# NOTE(dhellmann): Avoid a cyclical import by doing this one
|
||||
# at runtime.
|
||||
from oslo import messaging
|
||||
logging.Handler.__init__(self, *args, **kwargs)
|
||||
self._transport = messaging.get_transport(cfg.CONF)
|
||||
self._notifier = messaging.Notifier(self._transport,
|
||||
publisher_id='error.publisher')
|
||||
|
||||
def emit(self, record):
|
||||
# NOTE(bnemec): Notifier registers this opt with the transport.
|
||||
if ('log' in self._transport.conf.notification_driver):
|
||||
# NOTE(lbragstad): If we detect that log is one of the
|
||||
# notification drivers, then return. This protects from infinite
|
||||
# recursion where something bad happens, it gets logged, the log
|
||||
# handler sends a notification, and the log_notifier sees the
|
||||
# notification and logs it.
|
||||
return
|
||||
self._notifier.error(None, 'error_notification',
|
||||
dict(error=record.msg))
|
||||
|
||||
|
||||
PublishErrorsHandler = LoggingErrorNotificationHandler
|
||||
from oslo_messaging.notify.log_handler import * # noqa
|
||||
|
@ -1,5 +1,3 @@
|
||||
# Copyright 2013 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -11,71 +9,5 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Driver for the Python logging package that sends log records as a notification.
|
||||
"""
|
||||
import logging
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.messaging.notify import notifier
|
||||
from oslo.messaging import transport
|
||||
|
||||
|
||||
class LoggingNotificationHandler(logging.Handler):
|
||||
"""Handler for logging to the messaging notification system.
|
||||
|
||||
Each time the application logs a message using the :py:mod:`logging`
|
||||
module, it will be sent as a notification. The severity used for the
|
||||
notification will be the same as the one used for the log record.
|
||||
|
||||
This can be used into a Python logging configuration this way::
|
||||
|
||||
[handler_notifier]
|
||||
class=oslo.messaging.LoggingNotificationHandler
|
||||
level=ERROR
|
||||
args=('qpid:///')
|
||||
|
||||
"""
|
||||
|
||||
CONF = cfg.CONF
|
||||
"""Default configuration object used, subclass this class if you want to
|
||||
use another one.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, url, publisher_id=None, driver=None,
|
||||
topic=None, serializer=None):
|
||||
self.notifier = notifier.Notifier(
|
||||
transport.get_transport(self.CONF, url),
|
||||
publisher_id, driver,
|
||||
topic,
|
||||
serializer() if serializer else None)
|
||||
logging.Handler.__init__(self)
|
||||
|
||||
def emit(self, record):
|
||||
"""Emit the log record to the messaging notification system.
|
||||
|
||||
:param record: A log record to emit.
|
||||
|
||||
"""
|
||||
method = getattr(self.notifier, record.levelname.lower(), None)
|
||||
|
||||
if not method:
|
||||
return
|
||||
|
||||
method(None,
|
||||
'logrecord',
|
||||
{
|
||||
'name': record.name,
|
||||
'levelno': record.levelno,
|
||||
'levelname': record.levelname,
|
||||
'exc_info': record.exc_info,
|
||||
'pathname': record.pathname,
|
||||
'lineno': record.lineno,
|
||||
'msg': record.getMessage(),
|
||||
'funcName': record.funcName,
|
||||
'thread': record.thread,
|
||||
'processName': record.processName,
|
||||
'process': record.process,
|
||||
'extra': getattr(record, 'extra', None),
|
||||
})
|
||||
from oslo_messaging.notify.logger import * # noqa
|
||||
|
@ -1,5 +1,3 @@
|
||||
# Copyright (c) 2013-2014 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -12,117 +10,4 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Send notifications on request
|
||||
|
||||
"""
|
||||
import logging
|
||||
import os.path
|
||||
import sys
|
||||
import traceback as tb
|
||||
|
||||
import six
|
||||
import webob.dec
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo import messaging
|
||||
from oslo.messaging._i18n import _LE
|
||||
from oslo.messaging import notify
|
||||
from oslo.messaging.openstack.common import context
|
||||
from oslo.middleware import base
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def log_and_ignore_error(fn):
|
||||
def wrapped(*args, **kwargs):
|
||||
try:
|
||||
return fn(*args, **kwargs)
|
||||
except Exception as e:
|
||||
LOG.exception(_LE('An exception occurred processing '
|
||||
'the API call: %s ') % e)
|
||||
return wrapped
|
||||
|
||||
|
||||
class RequestNotifier(base.Middleware):
|
||||
"""Send notification on request."""
|
||||
|
||||
@classmethod
|
||||
def factory(cls, global_conf, **local_conf):
|
||||
"""Factory method for paste.deploy."""
|
||||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
|
||||
def _factory(app):
|
||||
return cls(app, **conf)
|
||||
return _factory
|
||||
|
||||
def __init__(self, app, **conf):
|
||||
self.notifier = notify.Notifier(
|
||||
messaging.get_transport(cfg.CONF, conf.get('url')),
|
||||
publisher_id=conf.get('publisher_id',
|
||||
os.path.basename(sys.argv[0])))
|
||||
self.service_name = conf.get('service_name')
|
||||
self.ignore_req_list = [x.upper().strip() for x in
|
||||
conf.get('ignore_req_list', '').split(',')]
|
||||
super(RequestNotifier, self).__init__(app)
|
||||
|
||||
@staticmethod
|
||||
def environ_to_dict(environ):
|
||||
"""Following PEP 333, server variables are lower case, so don't
|
||||
include them.
|
||||
|
||||
"""
|
||||
return dict((k, v) for k, v in six.iteritems(environ)
|
||||
if k.isupper() and k != 'HTTP_X_AUTH_TOKEN')
|
||||
|
||||
@log_and_ignore_error
|
||||
def process_request(self, request):
|
||||
request.environ['HTTP_X_SERVICE_NAME'] = \
|
||||
self.service_name or request.host
|
||||
payload = {
|
||||
'request': self.environ_to_dict(request.environ),
|
||||
}
|
||||
|
||||
self.notifier.info(context.get_admin_context(),
|
||||
'http.request',
|
||||
payload)
|
||||
|
||||
@log_and_ignore_error
|
||||
def process_response(self, request, response,
|
||||
exception=None, traceback=None):
|
||||
payload = {
|
||||
'request': self.environ_to_dict(request.environ),
|
||||
}
|
||||
|
||||
if response:
|
||||
payload['response'] = {
|
||||
'status': response.status,
|
||||
'headers': response.headers,
|
||||
}
|
||||
|
||||
if exception:
|
||||
payload['exception'] = {
|
||||
'value': repr(exception),
|
||||
'traceback': tb.format_tb(traceback)
|
||||
}
|
||||
|
||||
self.notifier.info(context.get_admin_context(),
|
||||
'http.response',
|
||||
payload)
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
if req.method in self.ignore_req_list:
|
||||
return req.get_response(self.application)
|
||||
else:
|
||||
self.process_request(req)
|
||||
try:
|
||||
response = req.get_response(self.application)
|
||||
except Exception:
|
||||
exc_type, value, traceback = sys.exc_info()
|
||||
self.process_response(req, None, value, traceback)
|
||||
raise
|
||||
else:
|
||||
self.process_response(req, response)
|
||||
return response
|
||||
from oslo_messaging.notify.middleware import * # noqa
|
||||
|
@ -1,8 +1,3 @@
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -15,301 +10,4 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import logging
|
||||
import uuid
|
||||
|
||||
import six
|
||||
from stevedore import named
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.messaging import serializer as msg_serializer
|
||||
from oslo.utils import timeutils
|
||||
|
||||
_notifier_opts = [
|
||||
cfg.MultiStrOpt('notification_driver',
|
||||
default=[],
|
||||
help='Driver or drivers to handle sending notifications.'),
|
||||
cfg.ListOpt('notification_topics',
|
||||
default=['notifications', ],
|
||||
deprecated_name='topics',
|
||||
deprecated_group='rpc_notifier2',
|
||||
help='AMQP topic used for OpenStack notifications.'),
|
||||
]
|
||||
|
||||
_LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class _Driver(object):
|
||||
|
||||
def __init__(self, conf, topics, transport):
|
||||
self.conf = conf
|
||||
self.topics = topics
|
||||
self.transport = transport
|
||||
|
||||
@abc.abstractmethod
|
||||
def notify(self, ctxt, msg, priority, retry):
|
||||
pass
|
||||
|
||||
|
||||
class Notifier(object):
|
||||
|
||||
"""Send notification messages.
|
||||
|
||||
The Notifier class is used for sending notification messages over a
|
||||
messaging transport or other means.
|
||||
|
||||
Notification messages follow the following format::
|
||||
|
||||
{'message_id': six.text_type(uuid.uuid4()),
|
||||
'publisher_id': 'compute.host1',
|
||||
'timestamp': timeutils.utcnow(),
|
||||
'priority': 'WARN',
|
||||
'event_type': 'compute.create_instance',
|
||||
'payload': {'instance_id': 12, ... }}
|
||||
|
||||
A Notifier object can be instantiated with a transport object and a
|
||||
publisher ID:
|
||||
|
||||
notifier = messaging.Notifier(get_transport(CONF), 'compute')
|
||||
|
||||
and notifications are sent via drivers chosen with the notification_driver
|
||||
config option and on the topics chosen with the notification_topics config
|
||||
option.
|
||||
|
||||
Alternatively, a Notifier object can be instantiated with a specific
|
||||
driver or topic::
|
||||
|
||||
notifier = notifier.Notifier(RPC_TRANSPORT,
|
||||
'compute.host',
|
||||
driver='messaging',
|
||||
topic='notifications')
|
||||
|
||||
Notifier objects are relatively expensive to instantiate (mostly the cost
|
||||
of loading notification drivers), so it is possible to specialize a given
|
||||
Notifier object with a different publisher id using the prepare() method::
|
||||
|
||||
notifier = notifier.prepare(publisher_id='compute')
|
||||
notifier.info(ctxt, event_type, payload)
|
||||
"""
|
||||
|
||||
def __init__(self, transport, publisher_id=None,
|
||||
driver=None, topic=None,
|
||||
serializer=None, retry=None):
|
||||
"""Construct a Notifier object.
|
||||
|
||||
:param transport: the transport to use for sending messages
|
||||
:type transport: oslo.messaging.Transport
|
||||
:param publisher_id: field in notifications sent, for example
|
||||
'compute.host1'
|
||||
:type publisher_id: str
|
||||
:param driver: a driver to lookup from oslo.messaging.notify.drivers
|
||||
:type driver: str
|
||||
:param topic: the topic which to send messages on
|
||||
:type topic: str
|
||||
:param serializer: an optional entity serializer
|
||||
:type serializer: Serializer
|
||||
:param retry: an connection retries configuration
|
||||
None or -1 means to retry forever
|
||||
0 means no retry
|
||||
N means N retries
|
||||
:type retry: int
|
||||
"""
|
||||
transport.conf.register_opts(_notifier_opts)
|
||||
|
||||
self.transport = transport
|
||||
self.publisher_id = publisher_id
|
||||
self.retry = retry
|
||||
|
||||
self._driver_names = ([driver] if driver is not None
|
||||
else transport.conf.notification_driver)
|
||||
|
||||
self._topics = ([topic] if topic is not None
|
||||
else transport.conf.notification_topics)
|
||||
self._serializer = serializer or msg_serializer.NoOpSerializer()
|
||||
|
||||
self._driver_mgr = named.NamedExtensionManager(
|
||||
'oslo.messaging.notify.drivers',
|
||||
names=self._driver_names,
|
||||
invoke_on_load=True,
|
||||
invoke_args=[transport.conf],
|
||||
invoke_kwds={
|
||||
'topics': self._topics,
|
||||
'transport': self.transport,
|
||||
}
|
||||
)
|
||||
|
||||
_marker = object()
|
||||
|
||||
def prepare(self, publisher_id=_marker, retry=_marker):
|
||||
"""Return a specialized Notifier instance.
|
||||
|
||||
Returns a new Notifier instance with the supplied publisher_id. Allows
|
||||
sending notifications from multiple publisher_ids without the overhead
|
||||
of notification driver loading.
|
||||
|
||||
:param publisher_id: field in notifications sent, for example
|
||||
'compute.host1'
|
||||
:type publisher_id: str
|
||||
:param retry: an connection retries configuration
|
||||
None or -1 means to retry forever
|
||||
0 means no retry
|
||||
N means N retries
|
||||
:type retry: int
|
||||
"""
|
||||
return _SubNotifier._prepare(self, publisher_id, retry=retry)
|
||||
|
||||
def _notify(self, ctxt, event_type, payload, priority, publisher_id=None,
|
||||
retry=None):
|
||||
payload = self._serializer.serialize_entity(ctxt, payload)
|
||||
ctxt = self._serializer.serialize_context(ctxt)
|
||||
|
||||
msg = dict(message_id=six.text_type(uuid.uuid4()),
|
||||
publisher_id=publisher_id or self.publisher_id,
|
||||
event_type=event_type,
|
||||
priority=priority,
|
||||
payload=payload,
|
||||
timestamp=six.text_type(timeutils.utcnow()))
|
||||
|
||||
def do_notify(ext):
|
||||
try:
|
||||
ext.obj.notify(ctxt, msg, priority, retry or self.retry)
|
||||
except Exception as e:
|
||||
_LOG.exception("Problem '%(e)s' attempting to send to "
|
||||
"notification system. Payload=%(payload)s",
|
||||
dict(e=e, payload=payload))
|
||||
|
||||
if self._driver_mgr.extensions:
|
||||
self._driver_mgr.map(do_notify)
|
||||
|
||||
def audit(self, ctxt, event_type, payload):
|
||||
"""Send a notification at audit level.
|
||||
|
||||
:param ctxt: a request context dict
|
||||
:type ctxt: dict
|
||||
:param event_type: describes the event, for example
|
||||
'compute.create_instance'
|
||||
:type event_type: str
|
||||
:param payload: the notification payload
|
||||
:type payload: dict
|
||||
:raises: MessageDeliveryFailure
|
||||
"""
|
||||
self._notify(ctxt, event_type, payload, 'AUDIT')
|
||||
|
||||
def debug(self, ctxt, event_type, payload):
|
||||
"""Send a notification at debug level.
|
||||
|
||||
:param ctxt: a request context dict
|
||||
:type ctxt: dict
|
||||
:param event_type: describes the event, for example
|
||||
'compute.create_instance'
|
||||
:type event_type: str
|
||||
:param payload: the notification payload
|
||||
:type payload: dict
|
||||
:raises: MessageDeliveryFailure
|
||||
"""
|
||||
self._notify(ctxt, event_type, payload, 'DEBUG')
|
||||
|
||||
def info(self, ctxt, event_type, payload):
|
||||
"""Send a notification at info level.
|
||||
|
||||
:param ctxt: a request context dict
|
||||
:type ctxt: dict
|
||||
:param event_type: describes the event, for example
|
||||
'compute.create_instance'
|
||||
:type event_type: str
|
||||
:param payload: the notification payload
|
||||
:type payload: dict
|
||||
:raises: MessageDeliveryFailure
|
||||
"""
|
||||
self._notify(ctxt, event_type, payload, 'INFO')
|
||||
|
||||
def warn(self, ctxt, event_type, payload):
|
||||
"""Send a notification at warning level.
|
||||
|
||||
:param ctxt: a request context dict
|
||||
:type ctxt: dict
|
||||
:param event_type: describes the event, for example
|
||||
'compute.create_instance'
|
||||
:type event_type: str
|
||||
:param payload: the notification payload
|
||||
:type payload: dict
|
||||
:raises: MessageDeliveryFailure
|
||||
"""
|
||||
self._notify(ctxt, event_type, payload, 'WARN')
|
||||
|
||||
warning = warn
|
||||
|
||||
def error(self, ctxt, event_type, payload):
|
||||
"""Send a notification at error level.
|
||||
|
||||
:param ctxt: a request context dict
|
||||
:type ctxt: dict
|
||||
:param event_type: describes the event, for example
|
||||
'compute.create_instance'
|
||||
:type event_type: str
|
||||
:param payload: the notification payload
|
||||
:type payload: dict
|
||||
:raises: MessageDeliveryFailure
|
||||
"""
|
||||
self._notify(ctxt, event_type, payload, 'ERROR')
|
||||
|
||||
def critical(self, ctxt, event_type, payload):
|
||||
"""Send a notification at critical level.
|
||||
|
||||
:param ctxt: a request context dict
|
||||
:type ctxt: dict
|
||||
:param event_type: describes the event, for example
|
||||
'compute.create_instance'
|
||||
:type event_type: str
|
||||
:param payload: the notification payload
|
||||
:type payload: dict
|
||||
:raises: MessageDeliveryFailure
|
||||
"""
|
||||
self._notify(ctxt, event_type, payload, 'CRITICAL')
|
||||
|
||||
def sample(self, ctxt, event_type, payload):
|
||||
"""Send a notification at sample level.
|
||||
|
||||
Sample notifications are for high-frequency events
|
||||
that typically contain small payloads. eg: "CPU = 70%"
|
||||
|
||||
Not all drivers support the sample level
|
||||
(log, for example) so these could be dropped.
|
||||
|
||||
:param ctxt: a request context dict
|
||||
:type ctxt: dict
|
||||
:param event_type: describes the event, for example
|
||||
'compute.create_instance'
|
||||
:type event_type: str
|
||||
:param payload: the notification payload
|
||||
:type payload: dict
|
||||
:raises: MessageDeliveryFailure
|
||||
"""
|
||||
self._notify(ctxt, event_type, payload, 'SAMPLE')
|
||||
|
||||
|
||||
class _SubNotifier(Notifier):
|
||||
|
||||
_marker = Notifier._marker
|
||||
|
||||
def __init__(self, base, publisher_id, retry):
|
||||
self._base = base
|
||||
self.transport = base.transport
|
||||
self.publisher_id = publisher_id
|
||||
self.retry = retry
|
||||
|
||||
self._serializer = self._base._serializer
|
||||
self._driver_mgr = self._base._driver_mgr
|
||||
|
||||
def _notify(self, ctxt, event_type, payload, priority):
|
||||
super(_SubNotifier, self)._notify(ctxt, event_type, payload, priority)
|
||||
|
||||
@classmethod
|
||||
def _prepare(cls, base, publisher_id=_marker, retry=_marker):
|
||||
if publisher_id is cls._marker:
|
||||
publisher_id = base.publisher_id
|
||||
if retry is cls._marker:
|
||||
retry = base.retry
|
||||
return cls(base, publisher_id, retry=retry)
|
||||
from oslo_messaging.notify.notifier import * # noqa
|
||||
|
@ -1,9 +1,3 @@
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -16,382 +10,4 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
__all__ = [
|
||||
'ClientSendError',
|
||||
'RPCClient',
|
||||
'RPCVersionCapError',
|
||||
'RemoteError',
|
||||
]
|
||||
|
||||
import six
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.messaging._drivers import base as driver_base
|
||||
from oslo.messaging import _utils as utils
|
||||
from oslo.messaging import exceptions
|
||||
from oslo.messaging import serializer as msg_serializer
|
||||
|
||||
_client_opts = [
|
||||
cfg.IntOpt('rpc_response_timeout',
|
||||
default=60,
|
||||
help='Seconds to wait for a response from a call.'),
|
||||
]
|
||||
|
||||
|
||||
class RemoteError(exceptions.MessagingException):
|
||||
|
||||
"""Signifies that a remote endpoint method has raised an exception.
|
||||
|
||||
Contains a string representation of the type of the original exception,
|
||||
the value of the original exception, and the traceback. These are
|
||||
sent to the parent as a joined string so printing the exception
|
||||
contains all of the relevant info.
|
||||
"""
|
||||
|
||||
def __init__(self, exc_type=None, value=None, traceback=None):
|
||||
self.exc_type = exc_type
|
||||
self.value = value
|
||||
self.traceback = traceback
|
||||
msg = ("Remote error: %(exc_type)s %(value)s\n%(traceback)s." %
|
||||
dict(exc_type=self.exc_type, value=self.value,
|
||||
traceback=self.traceback))
|
||||
super(RemoteError, self).__init__(msg)
|
||||
|
||||
|
||||
class RPCVersionCapError(exceptions.MessagingException):
|
||||
|
||||
def __init__(self, version, version_cap):
|
||||
self.version = version
|
||||
self.version_cap = version_cap
|
||||
msg = ("Requested message version, %(version)s is too high. It needs "
|
||||
"to be lower than the specified version cap %(version_cap)s." %
|
||||
dict(version=self.version, version_cap=self.version_cap))
|
||||
super(RPCVersionCapError, self).__init__(msg)
|
||||
|
||||
|
||||
class ClientSendError(exceptions.MessagingException):
|
||||
"""Raised if we failed to send a message to a target."""
|
||||
|
||||
def __init__(self, target, ex):
|
||||
msg = 'Failed to send to target "%s": %s' % (target, ex)
|
||||
super(ClientSendError, self).__init__(msg)
|
||||
self.target = target
|
||||
self.ex = ex
|
||||
|
||||
|
||||
class _CallContext(object):
|
||||
|
||||
_marker = object()
|
||||
|
||||
def __init__(self, transport, target, serializer,
|
||||
timeout=None, version_cap=None, retry=None):
|
||||
self.conf = transport.conf
|
||||
|
||||
self.transport = transport
|
||||
self.target = target
|
||||
self.serializer = serializer
|
||||
self.timeout = timeout
|
||||
self.retry = retry
|
||||
self.version_cap = version_cap
|
||||
|
||||
super(_CallContext, self).__init__()
|
||||
|
||||
def _make_message(self, ctxt, method, args):
|
||||
msg = dict(method=method)
|
||||
|
||||
msg['args'] = dict()
|
||||
for argname, arg in six.iteritems(args):
|
||||
msg['args'][argname] = self.serializer.serialize_entity(ctxt, arg)
|
||||
|
||||
if self.target.namespace is not None:
|
||||
msg['namespace'] = self.target.namespace
|
||||
if self.target.version is not None:
|
||||
msg['version'] = self.target.version
|
||||
|
||||
return msg
|
||||
|
||||
def _check_version_cap(self, version):
|
||||
if not utils.version_is_compatible(self.version_cap, version):
|
||||
raise RPCVersionCapError(version=version,
|
||||
version_cap=self.version_cap)
|
||||
|
||||
def can_send_version(self, version=_marker):
|
||||
"""Check to see if a version is compatible with the version cap."""
|
||||
version = self.target.version if version is self._marker else version
|
||||
return (not self.version_cap or
|
||||
utils.version_is_compatible(self.version_cap,
|
||||
self.target.version))
|
||||
|
||||
def cast(self, ctxt, method, **kwargs):
|
||||
"""Invoke a method and return immediately. See RPCClient.cast()."""
|
||||
msg = self._make_message(ctxt, method, kwargs)
|
||||
ctxt = self.serializer.serialize_context(ctxt)
|
||||
|
||||
if self.version_cap:
|
||||
self._check_version_cap(msg.get('version'))
|
||||
try:
|
||||
self.transport._send(self.target, ctxt, msg, retry=self.retry)
|
||||
except driver_base.TransportDriverError as ex:
|
||||
raise ClientSendError(self.target, ex)
|
||||
|
||||
def call(self, ctxt, method, **kwargs):
|
||||
"""Invoke a method and wait for a reply. See RPCClient.call()."""
|
||||
if self.target.fanout:
|
||||
raise exceptions.InvalidTarget('A call cannot be used with fanout',
|
||||
self.target)
|
||||
|
||||
msg = self._make_message(ctxt, method, kwargs)
|
||||
msg_ctxt = self.serializer.serialize_context(ctxt)
|
||||
|
||||
timeout = self.timeout
|
||||
if self.timeout is None:
|
||||
timeout = self.conf.rpc_response_timeout
|
||||
|
||||
if self.version_cap:
|
||||
self._check_version_cap(msg.get('version'))
|
||||
|
||||
try:
|
||||
result = self.transport._send(self.target, msg_ctxt, msg,
|
||||
wait_for_reply=True, timeout=timeout,
|
||||
retry=self.retry)
|
||||
except driver_base.TransportDriverError as ex:
|
||||
raise ClientSendError(self.target, ex)
|
||||
return self.serializer.deserialize_entity(ctxt, result)
|
||||
|
||||
@classmethod
|
||||
def _prepare(cls, base,
|
||||
exchange=_marker, topic=_marker, namespace=_marker,
|
||||
version=_marker, server=_marker, fanout=_marker,
|
||||
timeout=_marker, version_cap=_marker, retry=_marker):
|
||||
"""Prepare a method invocation context. See RPCClient.prepare()."""
|
||||
kwargs = dict(
|
||||
exchange=exchange,
|
||||
topic=topic,
|
||||
namespace=namespace,
|
||||
version=version,
|
||||
server=server,
|
||||
fanout=fanout)
|
||||
kwargs = dict([(k, v) for k, v in kwargs.items()
|
||||
if v is not cls._marker])
|
||||
target = base.target(**kwargs)
|
||||
|
||||
if timeout is cls._marker:
|
||||
timeout = base.timeout
|
||||
if retry is cls._marker:
|
||||
retry = base.retry
|
||||
if version_cap is cls._marker:
|
||||
version_cap = base.version_cap
|
||||
|
||||
return _CallContext(base.transport, target,
|
||||
base.serializer,
|
||||
timeout, version_cap, retry)
|
||||
|
||||
def prepare(self, exchange=_marker, topic=_marker, namespace=_marker,
|
||||
version=_marker, server=_marker, fanout=_marker,
|
||||
timeout=_marker, version_cap=_marker, retry=_marker):
|
||||
"""Prepare a method invocation context. See RPCClient.prepare()."""
|
||||
return self._prepare(self,
|
||||
exchange, topic, namespace,
|
||||
version, server, fanout,
|
||||
timeout, version_cap, retry)
|
||||
|
||||
|
||||
class RPCClient(object):
|
||||
|
||||
"""A class for invoking methods on remote servers.
|
||||
|
||||
The RPCClient class is responsible for sending method invocations to remote
|
||||
servers via a messaging transport.
|
||||
|
||||
A default target is supplied to the RPCClient constructor, but target
|
||||
attributes can be overridden for individual method invocations using the
|
||||
prepare() method.
|
||||
|
||||
A method invocation consists of a request context dictionary, a method name
|
||||
and a dictionary of arguments. A cast() invocation just sends the request
|
||||
and returns immediately. A call() invocation waits for the server to send
|
||||
a return value.
|
||||
|
||||
This class is intended to be used by wrapping it in another class which
|
||||
provides methods on the subclass to perform the remote invocation using
|
||||
call() or cast()::
|
||||
|
||||
class TestClient(object):
|
||||
|
||||
def __init__(self, transport):
|
||||
target = messaging.Target(topic='testtopic', version='2.0')
|
||||
self._client = messaging.RPCClient(transport, target)
|
||||
|
||||
def test(self, ctxt, arg):
|
||||
return self._client.call(ctxt, 'test', arg=arg)
|
||||
|
||||
An example of using the prepare() method to override some attributes of the
|
||||
default target::
|
||||
|
||||
def test(self, ctxt, arg):
|
||||
cctxt = self._client.prepare(version='2.5')
|
||||
return cctxt.call(ctxt, 'test', arg=arg)
|
||||
|
||||
RPCClient have a number of other properties - for example, timeout and
|
||||
version_cap - which may make sense to override for some method invocations,
|
||||
so they too can be passed to prepare()::
|
||||
|
||||
def test(self, ctxt, arg):
|
||||
cctxt = self._client.prepare(timeout=10)
|
||||
return cctxt.call(ctxt, 'test', arg=arg)
|
||||
|
||||
However, this class can be used directly without wrapping it another class.
|
||||
For example::
|
||||
|
||||
transport = messaging.get_transport(cfg.CONF)
|
||||
target = messaging.Target(topic='testtopic', version='2.0')
|
||||
client = messaging.RPCClient(transport, target)
|
||||
client.call(ctxt, 'test', arg=arg)
|
||||
|
||||
but this is probably only useful in limited circumstances as a wrapper
|
||||
class will usually help to make the code much more obvious.
|
||||
|
||||
By default, cast() and call() will block until the message is successfully
|
||||
sent. However, the retry parameter can be used to have message sending
|
||||
fail with a MessageDeliveryFailure after the given number of retries. For
|
||||
example::
|
||||
|
||||
client = messaging.RPCClient(transport, target, retry=None)
|
||||
client.call(ctxt, 'sync')
|
||||
try:
|
||||
client.prepare(retry=0).cast(ctxt, 'ping')
|
||||
except messaging.MessageDeliveryFailure:
|
||||
LOG.error("Failed to send ping message")
|
||||
"""
|
||||
|
||||
def __init__(self, transport, target,
|
||||
timeout=None, version_cap=None, serializer=None, retry=None):
|
||||
"""Construct an RPC client.
|
||||
|
||||
:param transport: a messaging transport handle
|
||||
:type transport: Transport
|
||||
:param target: the default target for invocations
|
||||
:type target: Target
|
||||
:param timeout: an optional default timeout (in seconds) for call()s
|
||||
:type timeout: int or float
|
||||
:param version_cap: raise a RPCVersionCapError version exceeds this cap
|
||||
:type version_cap: str
|
||||
:param serializer: an optional entity serializer
|
||||
:type serializer: Serializer
|
||||
:param retry: an optional default connection retries configuration
|
||||
None or -1 means to retry forever
|
||||
0 means no retry
|
||||
N means N retries
|
||||
:type retry: int
|
||||
"""
|
||||
self.conf = transport.conf
|
||||
self.conf.register_opts(_client_opts)
|
||||
|
||||
self.transport = transport
|
||||
self.target = target
|
||||
self.timeout = timeout
|
||||
self.retry = retry
|
||||
self.version_cap = version_cap
|
||||
self.serializer = serializer or msg_serializer.NoOpSerializer()
|
||||
|
||||
super(RPCClient, self).__init__()
|
||||
|
||||
_marker = _CallContext._marker
|
||||
|
||||
def prepare(self, exchange=_marker, topic=_marker, namespace=_marker,
|
||||
version=_marker, server=_marker, fanout=_marker,
|
||||
timeout=_marker, version_cap=_marker, retry=_marker):
|
||||
"""Prepare a method invocation context.
|
||||
|
||||
Use this method to override client properties for an individual method
|
||||
invocation. For example::
|
||||
|
||||
def test(self, ctxt, arg):
|
||||
cctxt = self.prepare(version='2.5')
|
||||
return cctxt.call(ctxt, 'test', arg=arg)
|
||||
|
||||
:param exchange: see Target.exchange
|
||||
:type exchange: str
|
||||
:param topic: see Target.topic
|
||||
:type topic: str
|
||||
:param namespace: see Target.namespace
|
||||
:type namespace: str
|
||||
:param version: requirement the server must support, see Target.version
|
||||
:type version: str
|
||||
:param server: send to a specific server, see Target.server
|
||||
:type server: str
|
||||
:param fanout: send to all servers on topic, see Target.fanout
|
||||
:type fanout: bool
|
||||
:param timeout: an optional default timeout (in seconds) for call()s
|
||||
:type timeout: int or float
|
||||
:param version_cap: raise a RPCVersionCapError version exceeds this cap
|
||||
:type version_cap: str
|
||||
:param retry: an optional connection retries configuration
|
||||
None or -1 means to retry forever
|
||||
0 means no retry
|
||||
N means N retries
|
||||
:type retry: int
|
||||
"""
|
||||
return _CallContext._prepare(self,
|
||||
exchange, topic, namespace,
|
||||
version, server, fanout,
|
||||
timeout, version_cap, retry)
|
||||
|
||||
def cast(self, ctxt, method, **kwargs):
|
||||
"""Invoke a method and return immediately.
|
||||
|
||||
Method arguments must either be primitive types or types supported by
|
||||
the client's serializer (if any).
|
||||
|
||||
Similarly, the request context must be a dict unless the client's
|
||||
serializer supports serializing another type.
|
||||
|
||||
:param ctxt: a request context dict
|
||||
:type ctxt: dict
|
||||
:param method: the method name
|
||||
:type method: str
|
||||
:param kwargs: a dict of method arguments
|
||||
:type kwargs: dict
|
||||
:raises: MessageDeliveryFailure
|
||||
"""
|
||||
self.prepare().cast(ctxt, method, **kwargs)
|
||||
|
||||
def call(self, ctxt, method, **kwargs):
|
||||
"""Invoke a method and wait for a reply.
|
||||
|
||||
Method arguments must either be primitive types or types supported by
|
||||
the client's serializer (if any). Similarly, the request context must
|
||||
be a dict unless the client's serializer supports serializing another
|
||||
type.
|
||||
|
||||
The semantics of how any errors raised by the remote RPC endpoint
|
||||
method are handled are quite subtle.
|
||||
|
||||
Firstly, if the remote exception is contained in one of the modules
|
||||
listed in the allow_remote_exmods messaging.get_transport() parameter,
|
||||
then it this exception will be re-raised by call(). However, such
|
||||
locally re-raised remote exceptions are distinguishable from the same
|
||||
exception type raised locally because re-raised remote exceptions are
|
||||
modified such that their class name ends with the '_Remote' suffix so
|
||||
you may do::
|
||||
|
||||
if ex.__class__.__name__.endswith('_Remote'):
|
||||
# Some special case for locally re-raised remote exceptions
|
||||
|
||||
Secondly, if a remote exception is not from a module listed in the
|
||||
allowed_remote_exmods list, then a messaging.RemoteError exception is
|
||||
raised with all details of the remote exception.
|
||||
|
||||
:param ctxt: a request context dict
|
||||
:type ctxt: dict
|
||||
:param method: the method name
|
||||
:type method: str
|
||||
:param kwargs: a dict of method arguments
|
||||
:type kwargs: dict
|
||||
:raises: MessagingTimeout, RemoteError, MessageDeliveryFailure
|
||||
"""
|
||||
return self.prepare().call(ctxt, method, **kwargs)
|
||||
|
||||
def can_send_version(self, version=_marker):
|
||||
"""Check to see if a version is compatible with the version cap."""
|
||||
return self.prepare(version=version).can_send_version()
|
||||
from oslo_messaging.rpc.client import * # noqa
|
||||
|
@ -1,9 +1,3 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
# Copyright 2013 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -16,180 +10,4 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
__all__ = [
|
||||
'NoSuchMethod',
|
||||
'RPCDispatcher',
|
||||
'RPCDispatcherError',
|
||||
'UnsupportedVersion',
|
||||
'ExpectedException',
|
||||
]
|
||||
|
||||
import contextlib
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import six
|
||||
|
||||
from oslo.messaging._i18n import _
|
||||
from oslo.messaging import _utils as utils
|
||||
from oslo.messaging import localcontext
|
||||
from oslo.messaging import serializer as msg_serializer
|
||||
from oslo.messaging import server as msg_server
|
||||
from oslo.messaging import target as msg_target
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ExpectedException(Exception):
|
||||
"""Encapsulates an expected exception raised by an RPC endpoint
|
||||
|
||||
Merely instantiating this exception records the current exception
|
||||
information, which will be passed back to the RPC client without
|
||||
exceptional logging.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.exc_info = sys.exc_info()
|
||||
|
||||
|
||||
class RPCDispatcherError(msg_server.MessagingServerError):
|
||||
"A base class for all RPC dispatcher exceptions."
|
||||
|
||||
|
||||
class NoSuchMethod(RPCDispatcherError, AttributeError):
|
||||
"Raised if there is no endpoint which exposes the requested method."
|
||||
|
||||
def __init__(self, method):
|
||||
msg = "Endpoint does not support RPC method %s" % method
|
||||
super(NoSuchMethod, self).__init__(msg)
|
||||
self.method = method
|
||||
|
||||
|
||||
class UnsupportedVersion(RPCDispatcherError):
|
||||
"Raised if there is no endpoint which supports the requested version."
|
||||
|
||||
def __init__(self, version, method=None):
|
||||
msg = "Endpoint does not support RPC version %s" % version
|
||||
if method:
|
||||
msg = "%s. Attempted method: %s" % (msg, method)
|
||||
super(UnsupportedVersion, self).__init__(msg)
|
||||
self.version = version
|
||||
self.method = method
|
||||
|
||||
|
||||
class RPCDispatcher(object):
|
||||
"""A message dispatcher which understands RPC messages.
|
||||
|
||||
A MessageHandlingServer is constructed by passing a callable dispatcher
|
||||
which is invoked with context and message dictionaries each time a message
|
||||
is received.
|
||||
|
||||
RPCDispatcher is one such dispatcher which understands the format of RPC
|
||||
messages. The dispatcher looks at the namespace, version and method values
|
||||
in the message and matches those against a list of available endpoints.
|
||||
|
||||
Endpoints may have a target attribute describing the namespace and version
|
||||
of the methods exposed by that object. All public methods on an endpoint
|
||||
object are remotely invokable by clients.
|
||||
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, target, endpoints, serializer):
|
||||
"""Construct a rpc server dispatcher.
|
||||
|
||||
:param target: the exchange, topic and server to listen on
|
||||
:type target: Target
|
||||
"""
|
||||
|
||||
self.endpoints = endpoints
|
||||
self.serializer = serializer or msg_serializer.NoOpSerializer()
|
||||
self._default_target = msg_target.Target()
|
||||
self._target = target
|
||||
|
||||
def _listen(self, transport):
|
||||
return transport._listen(self._target)
|
||||
|
||||
@staticmethod
|
||||
def _is_namespace(target, namespace):
|
||||
return namespace == target.namespace
|
||||
|
||||
@staticmethod
|
||||
def _is_compatible(target, version):
|
||||
endpoint_version = target.version or '1.0'
|
||||
return utils.version_is_compatible(endpoint_version, version)
|
||||
|
||||
def _do_dispatch(self, endpoint, method, ctxt, args, executor_callback):
|
||||
ctxt = self.serializer.deserialize_context(ctxt)
|
||||
new_args = dict()
|
||||
for argname, arg in six.iteritems(args):
|
||||
new_args[argname] = self.serializer.deserialize_entity(ctxt, arg)
|
||||
func = getattr(endpoint, method)
|
||||
if executor_callback:
|
||||
result = executor_callback(func, ctxt, **new_args)
|
||||
else:
|
||||
result = func(ctxt, **new_args)
|
||||
return self.serializer.serialize_entity(ctxt, result)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def __call__(self, incoming, executor_callback=None):
|
||||
incoming.acknowledge()
|
||||
yield lambda: self._dispatch_and_reply(incoming, executor_callback)
|
||||
|
||||
def _dispatch_and_reply(self, incoming, executor_callback):
|
||||
try:
|
||||
incoming.reply(self._dispatch(incoming.ctxt,
|
||||
incoming.message,
|
||||
executor_callback))
|
||||
except ExpectedException as e:
|
||||
LOG.debug(u'Expected exception during message handling (%s)',
|
||||
e.exc_info[1])
|
||||
incoming.reply(failure=e.exc_info, log_failure=False)
|
||||
except Exception as e:
|
||||
# sys.exc_info() is deleted by LOG.exception().
|
||||
exc_info = sys.exc_info()
|
||||
LOG.error(_('Exception during message handling: %s'), e,
|
||||
exc_info=exc_info)
|
||||
incoming.reply(failure=exc_info)
|
||||
# NOTE(dhellmann): Remove circular object reference
|
||||
# between the current stack frame and the traceback in
|
||||
# exc_info.
|
||||
del exc_info
|
||||
|
||||
def _dispatch(self, ctxt, message, executor_callback=None):
|
||||
"""Dispatch an RPC message to the appropriate endpoint method.
|
||||
|
||||
:param ctxt: the request context
|
||||
:type ctxt: dict
|
||||
:param message: the message payload
|
||||
:type message: dict
|
||||
:raises: NoSuchMethod, UnsupportedVersion
|
||||
"""
|
||||
method = message.get('method')
|
||||
args = message.get('args', {})
|
||||
namespace = message.get('namespace')
|
||||
version = message.get('version', '1.0')
|
||||
|
||||
found_compatible = False
|
||||
for endpoint in self.endpoints:
|
||||
target = getattr(endpoint, 'target', None)
|
||||
if not target:
|
||||
target = self._default_target
|
||||
|
||||
if not (self._is_namespace(target, namespace) and
|
||||
self._is_compatible(target, version)):
|
||||
continue
|
||||
|
||||
if hasattr(endpoint, method):
|
||||
localcontext.set_local_context(ctxt)
|
||||
try:
|
||||
return self._do_dispatch(endpoint, method, ctxt, args,
|
||||
executor_callback)
|
||||
finally:
|
||||
localcontext.clear_local_context()
|
||||
|
||||
found_compatible = True
|
||||
|
||||
if found_compatible:
|
||||
raise NoSuchMethod(method)
|
||||
else:
|
||||
raise UnsupportedVersion(version, method=method)
|
||||
from oslo_messaging.rpc.dispatcher import * # noqa
|
||||
|
@ -1,6 +1,3 @@
|
||||
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -13,140 +10,4 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
An RPC server exposes a number of endpoints, each of which contain a set of
|
||||
methods which may be invoked remotely by clients over a given transport.
|
||||
|
||||
To create an RPC server, you supply a transport, target and a list of
|
||||
endpoints.
|
||||
|
||||
A transport can be obtained simply by calling the get_transport() method::
|
||||
|
||||
transport = messaging.get_transport(conf)
|
||||
|
||||
which will load the appropriate transport driver according to the user's
|
||||
messaging configuration configuration. See get_transport() for more details.
|
||||
|
||||
The target supplied when creating an RPC server expresses the topic, server
|
||||
name and - optionally - the exchange to listen on. See Target for more details
|
||||
on these attributes.
|
||||
|
||||
Each endpoint object may have a target attribute which may have namespace and
|
||||
version fields set. By default, we use the 'null namespace' and version 1.0.
|
||||
Incoming method calls will be dispatched to the first endpoint with the
|
||||
requested method, a matching namespace and a compatible version number.
|
||||
|
||||
RPC servers have start(), stop() and wait() messages to begin handling
|
||||
requests, stop handling requests and wait for all in-process requests to
|
||||
complete.
|
||||
|
||||
A simple example of an RPC server with multiple endpoints might be::
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo import messaging
|
||||
|
||||
class ServerControlEndpoint(object):
|
||||
|
||||
target = messaging.Target(namespace='control',
|
||||
version='2.0')
|
||||
|
||||
def __init__(self, server):
|
||||
self.server = server
|
||||
|
||||
def stop(self, ctx):
|
||||
if server:
|
||||
self.server.stop()
|
||||
|
||||
class TestEndpoint(object):
|
||||
|
||||
def test(self, ctx, arg):
|
||||
return arg
|
||||
|
||||
transport = messaging.get_transport(cfg.CONF)
|
||||
target = messaging.Target(topic='test', server='server1')
|
||||
endpoints = [
|
||||
ServerControlEndpoint(None),
|
||||
TestEndpoint(),
|
||||
]
|
||||
server = messaging.get_rpc_server(transport, target, endpoints,
|
||||
executor='blocking')
|
||||
server.start()
|
||||
server.wait()
|
||||
|
||||
Clients can invoke methods on the server by sending the request to a topic and
|
||||
it gets sent to one of the servers listening on the topic, or by sending the
|
||||
request to a specific server listening on the topic, or by sending the request
|
||||
to all servers listening on the topic (known as fanout). These modes are chosen
|
||||
via the server and fanout attributes on Target but the mode used is transparent
|
||||
to the server.
|
||||
|
||||
The first parameter to method invocations is always the request context
|
||||
supplied by the client.
|
||||
|
||||
Parameters to the method invocation are primitive types and so must be the
|
||||
return values from the methods. By supplying a serializer object, a server can
|
||||
deserialize a request context and arguments from - and serialize return values
|
||||
to - primitive types.
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
'get_rpc_server',
|
||||
'expected_exceptions',
|
||||
]
|
||||
|
||||
from oslo.messaging.rpc import dispatcher as rpc_dispatcher
|
||||
from oslo.messaging import server as msg_server
|
||||
|
||||
|
||||
def get_rpc_server(transport, target, endpoints,
|
||||
executor='blocking', serializer=None):
|
||||
"""Construct an RPC server.
|
||||
|
||||
The executor parameter controls how incoming messages will be received and
|
||||
dispatched. By default, the most simple executor is used - the blocking
|
||||
executor.
|
||||
|
||||
If the eventlet executor is used, the threading and time library need to be
|
||||
monkeypatched.
|
||||
|
||||
:param transport: the messaging transport
|
||||
:type transport: Transport
|
||||
:param target: the exchange, topic and server to listen on
|
||||
:type target: Target
|
||||
:param endpoints: a list of endpoint objects
|
||||
:type endpoints: list
|
||||
:param executor: name of a message executor - for example
|
||||
'eventlet', 'blocking'
|
||||
:type executor: str
|
||||
:param serializer: an optional entity serializer
|
||||
:type serializer: Serializer
|
||||
"""
|
||||
dispatcher = rpc_dispatcher.RPCDispatcher(target, endpoints, serializer)
|
||||
return msg_server.MessageHandlingServer(transport, dispatcher, executor)
|
||||
|
||||
|
||||
def expected_exceptions(*exceptions):
|
||||
"""Decorator for RPC endpoint methods that raise expected exceptions.
|
||||
|
||||
Marking an endpoint method with this decorator allows the declaration
|
||||
of expected exceptions that the RPC server should not consider fatal,
|
||||
and not log as if they were generated in a real error scenario.
|
||||
|
||||
Note that this will cause listed exceptions to be wrapped in an
|
||||
ExpectedException, which is used internally by the RPC sever. The RPC
|
||||
client will see the original exception type.
|
||||
"""
|
||||
def outer(func):
|
||||
def inner(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
# Take advantage of the fact that we can catch
|
||||
# multiple exception types using a tuple of
|
||||
# exception classes, with subclass detection
|
||||
# for free. Any exception that is not in or
|
||||
# derived from the args passed to us will be
|
||||
# ignored and thrown as normal.
|
||||
except exceptions:
|
||||
raise rpc_dispatcher.ExpectedException()
|
||||
return inner
|
||||
return outer
|
||||
from oslo_messaging.rpc.server import * # noqa
|
||||
|
@ -1,5 +1,3 @@
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -12,65 +10,4 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
__all__ = ['Serializer', 'NoOpSerializer']
|
||||
|
||||
"""Provides the definition of a message serialization handler"""
|
||||
|
||||
import abc
|
||||
|
||||
import six
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class Serializer(object):
|
||||
"""Generic (de-)serialization definition base class."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def serialize_entity(self, ctxt, entity):
|
||||
"""Serialize something to primitive form.
|
||||
|
||||
:param ctxt: Request context, in deserialized form
|
||||
:param entity: Entity to be serialized
|
||||
:returns: Serialized form of entity
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def deserialize_entity(self, ctxt, entity):
|
||||
"""Deserialize something from primitive form.
|
||||
|
||||
:param ctxt: Request context, in deserialized form
|
||||
:param entity: Primitive to be deserialized
|
||||
:returns: Deserialized form of entity
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def serialize_context(self, ctxt):
|
||||
"""Serialize a request context into a dictionary.
|
||||
|
||||
:param ctxt: Request context
|
||||
:returns: Serialized form of context
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def deserialize_context(self, ctxt):
|
||||
"""Deserialize a dictionary into a request context.
|
||||
|
||||
:param ctxt: Request context dictionary
|
||||
:returns: Deserialized form of entity
|
||||
"""
|
||||
|
||||
|
||||
class NoOpSerializer(Serializer):
|
||||
"""A serializer that does nothing."""
|
||||
|
||||
def serialize_entity(self, ctxt, entity):
|
||||
return entity
|
||||
|
||||
def deserialize_entity(self, ctxt, entity):
|
||||
return entity
|
||||
|
||||
def serialize_context(self, ctxt):
|
||||
return ctxt
|
||||
|
||||
def deserialize_context(self, ctxt):
|
||||
return ctxt
|
||||
from oslo_messaging.serializer import * # noqa
|
||||
|
@ -1,9 +1,3 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
# Copyright 2013 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -16,135 +10,4 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
__all__ = [
|
||||
'ExecutorLoadFailure',
|
||||
'MessageHandlingServer',
|
||||
'MessagingServerError',
|
||||
'ServerListenError',
|
||||
]
|
||||
|
||||
from stevedore import driver
|
||||
|
||||
from oslo.messaging._drivers import base as driver_base
|
||||
from oslo.messaging import exceptions
|
||||
|
||||
|
||||
class MessagingServerError(exceptions.MessagingException):
|
||||
"""Base class for all MessageHandlingServer exceptions."""
|
||||
|
||||
|
||||
class ExecutorLoadFailure(MessagingServerError):
|
||||
"""Raised if an executor can't be loaded."""
|
||||
|
||||
def __init__(self, executor, ex):
|
||||
msg = 'Failed to load executor "%s": %s' % (executor, ex)
|
||||
super(ExecutorLoadFailure, self).__init__(msg)
|
||||
self.executor = executor
|
||||
self.ex = ex
|
||||
|
||||
|
||||
class ServerListenError(MessagingServerError):
|
||||
"""Raised if we failed to listen on a target."""
|
||||
|
||||
def __init__(self, target, ex):
|
||||
msg = 'Failed to listen on target "%s": %s' % (target, ex)
|
||||
super(ServerListenError, self).__init__(msg)
|
||||
self.target = target
|
||||
self.ex = ex
|
||||
|
||||
|
||||
class MessageHandlingServer(object):
|
||||
"""Server for handling messages.
|
||||
|
||||
Connect a transport to a dispatcher that knows how to process the
|
||||
message using an executor that knows how the app wants to create
|
||||
new tasks.
|
||||
"""
|
||||
|
||||
def __init__(self, transport, dispatcher, executor='blocking'):
|
||||
"""Construct a message handling server.
|
||||
|
||||
The dispatcher parameter is a callable which is invoked with context
|
||||
and message dictionaries each time a message is received.
|
||||
|
||||
The executor parameter controls how incoming messages will be received
|
||||
and dispatched. By default, the most simple executor is used - the
|
||||
blocking executor.
|
||||
|
||||
:param transport: the messaging transport
|
||||
:type transport: Transport
|
||||
:param dispatcher: a callable which is invoked for each method
|
||||
:type dispatcher: callable
|
||||
:param executor: name of message executor - for example
|
||||
'eventlet', 'blocking'
|
||||
:type executor: str
|
||||
"""
|
||||
self.conf = transport.conf
|
||||
|
||||
self.transport = transport
|
||||
self.dispatcher = dispatcher
|
||||
self.executor = executor
|
||||
|
||||
try:
|
||||
mgr = driver.DriverManager('oslo.messaging.executors',
|
||||
self.executor)
|
||||
except RuntimeError as ex:
|
||||
raise ExecutorLoadFailure(self.executor, ex)
|
||||
else:
|
||||
self._executor_cls = mgr.driver
|
||||
self._executor = None
|
||||
|
||||
super(MessageHandlingServer, self).__init__()
|
||||
|
||||
def start(self):
|
||||
"""Start handling incoming messages.
|
||||
|
||||
This method causes the server to begin polling the transport for
|
||||
incoming messages and passing them to the dispatcher. Message
|
||||
processing will continue until the stop() method is called.
|
||||
|
||||
The executor controls how the server integrates with the applications
|
||||
I/O handling strategy - it may choose to poll for messages in a new
|
||||
process, thread or co-operatively scheduled coroutine or simply by
|
||||
registering a callback with an event loop. Similarly, the executor may
|
||||
choose to dispatch messages in a new thread, coroutine or simply the
|
||||
current thread.
|
||||
"""
|
||||
if self._executor is not None:
|
||||
return
|
||||
try:
|
||||
listener = self.dispatcher._listen(self.transport)
|
||||
except driver_base.TransportDriverError as ex:
|
||||
raise ServerListenError(self.target, ex)
|
||||
|
||||
self._executor = self._executor_cls(self.conf, listener,
|
||||
self.dispatcher)
|
||||
self._executor.start()
|
||||
|
||||
def stop(self):
|
||||
"""Stop handling incoming messages.
|
||||
|
||||
Once this method returns, no new incoming messages will be handled by
|
||||
the server. However, the server may still be in the process of handling
|
||||
some messages, and underlying driver resources associated to this
|
||||
server are still in use. See 'wait' for more details.
|
||||
"""
|
||||
if self._executor is not None:
|
||||
self._executor.stop()
|
||||
|
||||
def wait(self):
|
||||
"""Wait for message processing to complete.
|
||||
|
||||
After calling stop(), there may still be some some existing messages
|
||||
which have not been completely processed. The wait() method blocks
|
||||
until all message processing has completed.
|
||||
|
||||
Once it's finished, the underlying driver resources associated to this
|
||||
server are released (like closing useless network connections).
|
||||
"""
|
||||
if self._executor is not None:
|
||||
self._executor.wait()
|
||||
# Close listener connection after processing all messages
|
||||
self._executor.listener.cleanup()
|
||||
|
||||
self._executor = None
|
||||
from oslo_messaging.server import * # noqa
|
||||
|
@ -1,6 +1,3 @@
|
||||
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -13,82 +10,4 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class Target(object):
|
||||
|
||||
"""Identifies the destination of messages.
|
||||
|
||||
A Target encapsulates all the information to identify where a message
|
||||
should be sent or what messages a server is listening for.
|
||||
|
||||
Different subsets of the information encapsulated in a Target object is
|
||||
relevant to various aspects of the API:
|
||||
|
||||
creating a server:
|
||||
topic and server is required; exchange is optional
|
||||
an endpoint's target:
|
||||
namespace and version is optional
|
||||
client sending a message:
|
||||
topic is required, all other attributes optional
|
||||
|
||||
Its attributes are:
|
||||
|
||||
:param exchange: A scope for topics. Leave unspecified to default to the
|
||||
control_exchange configuration option.
|
||||
:type exchange: str
|
||||
:param topic: A name which identifies the set of interfaces exposed by a
|
||||
server. Multiple servers may listen on a topic and messages will be
|
||||
dispatched to one of the servers in a round-robin fashion.
|
||||
:type topic: str
|
||||
:param namespace: Identifies a particular interface (i.e. set of methods)
|
||||
exposed by a server. The default interface has no namespace identifier
|
||||
and is referred to as the null namespace.
|
||||
:type namespace: str
|
||||
:param version: Interfaces have a major.minor version number associated
|
||||
with them. A minor number increment indicates a backwards compatible
|
||||
change and an incompatible change is indicated by a major number bump.
|
||||
Servers may implement multiple major versions and clients may require
|
||||
indicate that their message requires a particular minimum minor version.
|
||||
:type version: str
|
||||
:param server: Clients can request that a message be directed to a specific
|
||||
server, rather than just one of a pool of servers listening on the topic.
|
||||
:type server: str
|
||||
:param fanout: Clients may request that a message be directed to all
|
||||
servers listening on a topic by setting fanout to ``True``, rather than
|
||||
just one of them.
|
||||
:type fanout: bool
|
||||
"""
|
||||
|
||||
def __init__(self, exchange=None, topic=None, namespace=None,
|
||||
version=None, server=None, fanout=None):
|
||||
self.exchange = exchange
|
||||
self.topic = topic
|
||||
self.namespace = namespace
|
||||
self.version = version
|
||||
self.server = server
|
||||
self.fanout = fanout
|
||||
|
||||
def __call__(self, **kwargs):
|
||||
for a in ('exchange', 'topic', 'namespace',
|
||||
'version', 'server', 'fanout'):
|
||||
kwargs.setdefault(a, getattr(self, a))
|
||||
return Target(**kwargs)
|
||||
|
||||
def __eq__(self, other):
|
||||
return vars(self) == vars(other)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
||||
|
||||
def __repr__(self):
|
||||
attrs = []
|
||||
for a in ['exchange', 'topic', 'namespace',
|
||||
'version', 'server', 'fanout']:
|
||||
v = getattr(self, a)
|
||||
if v:
|
||||
attrs.append((a, v))
|
||||
values = ', '.join(['%s=%s' % i for i in attrs])
|
||||
return '<Target ' + values + '>'
|
||||
|
||||
def __hash__(self):
|
||||
return id(self)
|
||||
from oslo_messaging.target import * # noqa
|
||||
|
@ -1,10 +1,3 @@
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
# Copyright (c) 2012 Rackspace Hosting
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -17,408 +10,4 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
__all__ = [
|
||||
'DriverLoadFailure',
|
||||
'InvalidTransportURL',
|
||||
'Transport',
|
||||
'TransportHost',
|
||||
'TransportURL',
|
||||
'get_transport',
|
||||
'set_transport_defaults',
|
||||
]
|
||||
|
||||
import six
|
||||
from six.moves.urllib import parse
|
||||
from stevedore import driver
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.messaging import exceptions
|
||||
|
||||
|
||||
_transport_opts = [
|
||||
cfg.StrOpt('transport_url',
|
||||
help='A URL representing the messaging driver to use and its '
|
||||
'full configuration. If not set, we fall back to the '
|
||||
'rpc_backend option and driver specific configuration.'),
|
||||
cfg.StrOpt('rpc_backend',
|
||||
default='rabbit',
|
||||
help='The messaging driver to use, defaults to rabbit. Other '
|
||||
'drivers include qpid and zmq.'),
|
||||
cfg.StrOpt('control_exchange',
|
||||
default='openstack',
|
||||
help='The default exchange under which topics are scoped. May '
|
||||
'be overridden by an exchange name specified in the '
|
||||
'transport_url option.'),
|
||||
]
|
||||
|
||||
|
||||
def set_transport_defaults(control_exchange):
|
||||
"""Set defaults for messaging transport configuration options.
|
||||
|
||||
:param control_exchange: the default exchange under which topics are scoped
|
||||
:type control_exchange: str
|
||||
"""
|
||||
cfg.set_defaults(_transport_opts,
|
||||
control_exchange=control_exchange)
|
||||
|
||||
|
||||
class Transport(object):
|
||||
|
||||
"""A messaging transport.
|
||||
|
||||
This is a mostly opaque handle for an underlying messaging transport
|
||||
driver.
|
||||
|
||||
It has a single 'conf' property which is the cfg.ConfigOpts instance used
|
||||
to construct the transport object.
|
||||
"""
|
||||
|
||||
def __init__(self, driver):
|
||||
self.conf = driver.conf
|
||||
self._driver = driver
|
||||
|
||||
def _require_driver_features(self, requeue=False):
|
||||
self._driver.require_features(requeue=requeue)
|
||||
|
||||
def _send(self, target, ctxt, message, wait_for_reply=None, timeout=None,
|
||||
retry=None):
|
||||
if not target.topic:
|
||||
raise exceptions.InvalidTarget('A topic is required to send',
|
||||
target)
|
||||
return self._driver.send(target, ctxt, message,
|
||||
wait_for_reply=wait_for_reply,
|
||||
timeout=timeout, retry=retry)
|
||||
|
||||
def _send_notification(self, target, ctxt, message, version, retry=None):
|
||||
if not target.topic:
|
||||
raise exceptions.InvalidTarget('A topic is required to send',
|
||||
target)
|
||||
self._driver.send_notification(target, ctxt, message, version,
|
||||
retry=retry)
|
||||
|
||||
def _listen(self, target):
|
||||
if not (target.topic and target.server):
|
||||
raise exceptions.InvalidTarget('A server\'s target must have '
|
||||
'topic and server names specified',
|
||||
target)
|
||||
return self._driver.listen(target)
|
||||
|
||||
def _listen_for_notifications(self, targets_and_priorities, pool):
|
||||
for target, priority in targets_and_priorities:
|
||||
if not target.topic:
|
||||
raise exceptions.InvalidTarget('A target must have '
|
||||
'topic specified',
|
||||
target)
|
||||
return self._driver.listen_for_notifications(
|
||||
targets_and_priorities, pool)
|
||||
|
||||
def cleanup(self):
|
||||
"""Release all resources associated with this transport."""
|
||||
self._driver.cleanup()
|
||||
|
||||
|
||||
class InvalidTransportURL(exceptions.MessagingException):
|
||||
"""Raised if transport URL is invalid."""
|
||||
|
||||
def __init__(self, url, msg):
|
||||
super(InvalidTransportURL, self).__init__(msg)
|
||||
self.url = url
|
||||
|
||||
|
||||
class DriverLoadFailure(exceptions.MessagingException):
|
||||
"""Raised if a transport driver can't be loaded."""
|
||||
|
||||
def __init__(self, driver, ex):
|
||||
msg = 'Failed to load transport driver "%s": %s' % (driver, ex)
|
||||
super(DriverLoadFailure, self).__init__(msg)
|
||||
self.driver = driver
|
||||
self.ex = ex
|
||||
|
||||
|
||||
def get_transport(conf, url=None, allowed_remote_exmods=None, aliases=None):
|
||||
"""A factory method for Transport objects.
|
||||
|
||||
This method will construct a Transport object from transport configuration
|
||||
gleaned from the user's configuration and, optionally, a transport URL.
|
||||
|
||||
If a transport URL is supplied as a parameter, any transport configuration
|
||||
contained in it takes precedence. If no transport URL is supplied, but
|
||||
there is a transport URL supplied in the user's configuration then that
|
||||
URL will take the place of the URL parameter. In both cases, any
|
||||
configuration not supplied in the transport URL may be taken from
|
||||
individual configuration parameters in the user's configuration.
|
||||
|
||||
An example transport URL might be::
|
||||
|
||||
rabbit://me:passwd@host:5672/virtual_host
|
||||
|
||||
and can either be passed as a string or a TransportURL object.
|
||||
|
||||
:param conf: the user configuration
|
||||
:type conf: cfg.ConfigOpts
|
||||
:param url: a transport URL
|
||||
:type url: str or TransportURL
|
||||
:param allowed_remote_exmods: a list of modules which a client using this
|
||||
transport will deserialize remote exceptions
|
||||
from
|
||||
:type allowed_remote_exmods: list
|
||||
:param aliases: A map of transport alias to transport name
|
||||
:type aliases: dict
|
||||
"""
|
||||
allowed_remote_exmods = allowed_remote_exmods or []
|
||||
conf.register_opts(_transport_opts)
|
||||
|
||||
if not isinstance(url, TransportURL):
|
||||
url = url or conf.transport_url
|
||||
parsed = TransportURL.parse(conf, url, aliases)
|
||||
if not parsed.transport:
|
||||
raise InvalidTransportURL(url, 'No scheme specified in "%s"' % url)
|
||||
url = parsed
|
||||
|
||||
kwargs = dict(default_exchange=conf.control_exchange,
|
||||
allowed_remote_exmods=allowed_remote_exmods)
|
||||
|
||||
try:
|
||||
mgr = driver.DriverManager('oslo.messaging.drivers',
|
||||
url.transport.split('+')[0],
|
||||
invoke_on_load=True,
|
||||
invoke_args=[conf, url],
|
||||
invoke_kwds=kwargs)
|
||||
except RuntimeError as ex:
|
||||
raise DriverLoadFailure(url.transport, ex)
|
||||
|
||||
return Transport(mgr.driver)
|
||||
|
||||
|
||||
class TransportHost(object):
|
||||
|
||||
"""A host element of a parsed transport URL."""
|
||||
|
||||
def __init__(self, hostname=None, port=None, username=None, password=None):
|
||||
self.hostname = hostname
|
||||
self.port = port
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.hostname, self.port, self.username, self.password))
|
||||
|
||||
def __eq__(self, other):
|
||||
return vars(self) == vars(other)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
||||
|
||||
def __repr__(self):
|
||||
attrs = []
|
||||
for a in ['hostname', 'port', 'username', 'password']:
|
||||
v = getattr(self, a)
|
||||
if v:
|
||||
attrs.append((a, repr(v)))
|
||||
values = ', '.join(['%s=%s' % i for i in attrs])
|
||||
return '<TransportHost ' + values + '>'
|
||||
|
||||
|
||||
class TransportURL(object):
|
||||
|
||||
"""A parsed transport URL.
|
||||
|
||||
Transport URLs take the form::
|
||||
|
||||
transport://user:pass@host1:port[,hostN:portN]/virtual_host
|
||||
|
||||
i.e. the scheme selects the transport driver, you may include multiple
|
||||
hosts in netloc and the path part is a "virtual host" partition path.
|
||||
|
||||
:param conf: a ConfigOpts instance
|
||||
:type conf: oslo.config.cfg.ConfigOpts
|
||||
:param transport: a transport name for example 'rabbit' or 'qpid'
|
||||
:type transport: str
|
||||
:param virtual_host: a virtual host path for example '/'
|
||||
:type virtual_host: str
|
||||
:param hosts: a list of TransportHost objects
|
||||
:type hosts: list
|
||||
:param aliases: A map of transport alias to transport name
|
||||
:type aliases: dict
|
||||
"""
|
||||
|
||||
def __init__(self, conf, transport=None, virtual_host=None, hosts=None,
|
||||
aliases=None):
|
||||
self.conf = conf
|
||||
self.conf.register_opts(_transport_opts)
|
||||
self._transport = transport
|
||||
self.virtual_host = virtual_host
|
||||
if hosts is None:
|
||||
self.hosts = []
|
||||
else:
|
||||
self.hosts = hosts
|
||||
if aliases is None:
|
||||
self.aliases = {}
|
||||
else:
|
||||
self.aliases = aliases
|
||||
|
||||
@property
|
||||
def transport(self):
|
||||
if self._transport is None:
|
||||
transport = self.conf.rpc_backend
|
||||
else:
|
||||
transport = self._transport
|
||||
return self.aliases.get(transport, transport)
|
||||
|
||||
@transport.setter
|
||||
def transport(self, value):
|
||||
self._transport = value
|
||||
|
||||
def __hash__(self):
|
||||
return hash((tuple(self.hosts), self.transport, self.virtual_host))
|
||||
|
||||
def __eq__(self, other):
|
||||
return (self.transport == other.transport and
|
||||
self.virtual_host == other.virtual_host and
|
||||
self.hosts == other.hosts)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
||||
|
||||
def __repr__(self):
|
||||
attrs = []
|
||||
for a in ['transport', 'virtual_host', 'hosts']:
|
||||
v = getattr(self, a)
|
||||
if v:
|
||||
attrs.append((a, repr(v)))
|
||||
values = ', '.join(['%s=%s' % i for i in attrs])
|
||||
return '<TransportURL ' + values + '>'
|
||||
|
||||
def __str__(self):
|
||||
netlocs = []
|
||||
|
||||
for host in self.hosts:
|
||||
username = host.username
|
||||
password = host.password
|
||||
hostname = host.hostname
|
||||
port = host.port
|
||||
|
||||
# Starting place for the network location
|
||||
netloc = ''
|
||||
|
||||
# Build the username and password portion of the transport URL
|
||||
if username is not None or password is not None:
|
||||
if username is not None:
|
||||
netloc += parse.quote(username, '')
|
||||
if password is not None:
|
||||
netloc += ':%s' % parse.quote(password, '')
|
||||
netloc += '@'
|
||||
|
||||
# Build the network location portion of the transport URL
|
||||
if hostname:
|
||||
if ':' in hostname:
|
||||
netloc += '[%s]' % hostname
|
||||
else:
|
||||
netloc += hostname
|
||||
if port is not None:
|
||||
netloc += ':%d' % port
|
||||
|
||||
netlocs.append(netloc)
|
||||
|
||||
# Assemble the transport URL
|
||||
url = '%s://%s/' % (self.transport, ','.join(netlocs))
|
||||
|
||||
if self.virtual_host:
|
||||
url += parse.quote(self.virtual_host)
|
||||
|
||||
return url
|
||||
|
||||
@classmethod
|
||||
def parse(cls, conf, url, aliases=None):
|
||||
"""Parse an url.
|
||||
|
||||
Assuming a URL takes the form of::
|
||||
|
||||
transport://user:pass@host1:port[,hostN:portN]/virtual_host
|
||||
|
||||
then parse the URL and return a TransportURL object.
|
||||
|
||||
Netloc is parsed following the sequence bellow:
|
||||
|
||||
* It is first split by ',' in order to support multiple hosts
|
||||
* Username and password should be specified for each host, in
|
||||
case of lack of specification they will be omitted::
|
||||
|
||||
user:pass@host1:port1,host2:port2
|
||||
|
||||
[
|
||||
{"username": "user", "password": "pass", "host": "host1:port1"},
|
||||
{"host": "host2:port2"}
|
||||
]
|
||||
|
||||
:param conf: a ConfigOpts instance
|
||||
:type conf: oslo.config.cfg.ConfigOpts
|
||||
:param url: The URL to parse
|
||||
:type url: str
|
||||
:param aliases: A map of transport alias to transport name
|
||||
:type aliases: dict
|
||||
:returns: A TransportURL
|
||||
"""
|
||||
if not url:
|
||||
return cls(conf, aliases=aliases)
|
||||
|
||||
if not isinstance(url, six.string_types):
|
||||
raise InvalidTransportURL(url, 'Wrong URL type')
|
||||
|
||||
url = parse.urlparse(url)
|
||||
|
||||
# Make sure there's not a query string; that could identify
|
||||
# requirements we can't comply with (for example ssl), so reject it if
|
||||
# it's present
|
||||
if '?' in url.path or url.query:
|
||||
raise InvalidTransportURL(url.geturl(),
|
||||
"Cannot comply with query string in "
|
||||
"transport URL")
|
||||
|
||||
virtual_host = None
|
||||
if url.path.startswith('/'):
|
||||
virtual_host = url.path[1:]
|
||||
|
||||
hosts = []
|
||||
|
||||
username = password = ''
|
||||
for host in url.netloc.split(','):
|
||||
if not host:
|
||||
continue
|
||||
|
||||
hostname = host
|
||||
username = password = port = None
|
||||
|
||||
if '@' in host:
|
||||
username, hostname = host.split('@', 1)
|
||||
if ':' in username:
|
||||
username, password = username.split(':', 1)
|
||||
|
||||
if not hostname:
|
||||
hostname = None
|
||||
elif hostname.startswith('['):
|
||||
# Find the closing ']' and extract the hostname
|
||||
host_end = hostname.find(']')
|
||||
if host_end < 0:
|
||||
# NOTE(Vek): Identical to what Python 2.7's
|
||||
# urlparse.urlparse() raises in this case
|
||||
raise ValueError("Invalid IPv6 URL")
|
||||
|
||||
port_text = hostname[host_end:]
|
||||
hostname = hostname[1:host_end]
|
||||
|
||||
# Now we need the port; this is compliant with how urlparse
|
||||
# parses the port data
|
||||
port = None
|
||||
if ':' in port_text:
|
||||
port = int(port_text.split(':', 1)[1])
|
||||
elif ':' in hostname:
|
||||
hostname, port = hostname.split(':', 1)
|
||||
port = int(port)
|
||||
|
||||
hosts.append(TransportHost(hostname=hostname,
|
||||
port=port,
|
||||
username=username,
|
||||
password=password))
|
||||
|
||||
return cls(conf, url.scheme, virtual_host, hosts, aliases)
|
||||
from oslo_messaging.transport import * # noqa
|
||||
|
23
oslo_messaging/__init__.py
Normal file
23
oslo_messaging/__init__.py
Normal file
@ -0,0 +1,23 @@
|
||||
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from .exceptions import *
|
||||
from .localcontext import *
|
||||
from .notify import *
|
||||
from .rpc import *
|
||||
from .serializer import *
|
||||
from .server import *
|
||||
from .target import *
|
||||
from .transport import *
|
@ -22,8 +22,8 @@ import logging
|
||||
import sys
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.messaging._drivers import impl_zmq
|
||||
from oslo.messaging._executors import base # FIXME(markmc)
|
||||
from oslo_messaging._drivers import impl_zmq
|
||||
from oslo_messaging._executors import base # FIXME(markmc)
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(impl_zmq.zmq_opts)
|
@ -30,9 +30,9 @@ import uuid
|
||||
import six
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.messaging._drivers import common as rpc_common
|
||||
from oslo.messaging._drivers import pool
|
||||
from oslo.utils import strutils
|
||||
from oslo_messaging._drivers import common as rpc_common
|
||||
from oslo_messaging._drivers import pool
|
||||
|
||||
amqp_opts = [
|
||||
cfg.BoolOpt('amqp_durable_queues',
|
@ -21,12 +21,12 @@ import uuid
|
||||
|
||||
from six import moves
|
||||
|
||||
from oslo import messaging
|
||||
from oslo.messaging._drivers import amqp as rpc_amqp
|
||||
from oslo.messaging._drivers import base
|
||||
from oslo.messaging._drivers import common as rpc_common
|
||||
from oslo.messaging._i18n import _
|
||||
from oslo.messaging._i18n import _LI
|
||||
import oslo_messaging
|
||||
from oslo_messaging._drivers import amqp as rpc_amqp
|
||||
from oslo_messaging._drivers import base
|
||||
from oslo_messaging._drivers import common as rpc_common
|
||||
from oslo_messaging._i18n import _
|
||||
from oslo_messaging._i18n import _LI
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -143,8 +143,9 @@ class ReplyWaiters(object):
|
||||
try:
|
||||
return self._queues[msg_id].get(block=True, timeout=timeout)
|
||||
except moves.queue.Empty:
|
||||
raise messaging.MessagingTimeout('Timed out waiting for a reply '
|
||||
'to message ID %s' % msg_id)
|
||||
raise oslo_messaging.MessagingTimeout(
|
||||
'Timed out waiting for a reply '
|
||||
'to message ID %s' % msg_id)
|
||||
|
||||
def check(self, msg_id):
|
||||
try:
|
||||
@ -207,7 +208,7 @@ class ReplyWaiter(object):
|
||||
|
||||
@staticmethod
|
||||
def _raise_timeout_exception(msg_id):
|
||||
raise messaging.MessagingTimeout(
|
||||
raise oslo_messaging.MessagingTimeout(
|
||||
_('Timed out waiting for a reply to message ID %s.') % msg_id)
|
||||
|
||||
def _process_reply(self, data):
|
@ -17,7 +17,7 @@ import abc
|
||||
|
||||
import six
|
||||
|
||||
from oslo.messaging import exceptions
|
||||
from oslo_messaging import exceptions
|
||||
|
||||
|
||||
class TransportDriverError(exceptions.MessagingException):
|
@ -23,10 +23,10 @@ import traceback
|
||||
|
||||
import six
|
||||
|
||||
from oslo import messaging
|
||||
from oslo.messaging._i18n import _
|
||||
from oslo.messaging import _utils as utils
|
||||
from oslo.serialization import jsonutils
|
||||
import oslo_messaging
|
||||
from oslo_messaging._i18n import _
|
||||
from oslo_messaging import _utils as utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -211,7 +211,7 @@ def deserialize_remote_exception(data, allowed_remote_exmods):
|
||||
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
|
||||
# order to prevent arbitrary code execution.
|
||||
if module != _EXCEPTIONS_MODULE and module not in allowed_remote_exmods:
|
||||
return messaging.RemoteError(name, failure.get('message'), trace)
|
||||
return oslo_messaging.RemoteError(name, failure.get('message'), trace)
|
||||
|
||||
try:
|
||||
__import__(module)
|
||||
@ -222,7 +222,7 @@ def deserialize_remote_exception(data, allowed_remote_exmods):
|
||||
|
||||
failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
|
||||
except (AttributeError, TypeError, ImportError):
|
||||
return messaging.RemoteError(name, failure.get('message'), trace)
|
||||
return oslo_messaging.RemoteError(name, failure.get('message'), trace)
|
||||
|
||||
ex_type = type(failure)
|
||||
str_override = lambda self: message
|
@ -20,8 +20,8 @@ import time
|
||||
|
||||
from six import moves
|
||||
|
||||
from oslo import messaging
|
||||
from oslo.messaging._drivers import base
|
||||
import oslo_messaging
|
||||
from oslo_messaging._drivers import base
|
||||
|
||||
|
||||
class FakeIncomingMessage(base.IncomingMessage):
|
||||
@ -199,7 +199,7 @@ class FakeDriver(base.BaseDriver):
|
||||
else:
|
||||
return reply
|
||||
except moves.queue.Empty:
|
||||
raise messaging.MessagingTimeout(
|
||||
raise oslo_messaging.MessagingTimeout(
|
||||
'No reply on topic %s' % target.topic)
|
||||
|
||||
return None
|
||||
@ -218,17 +218,21 @@ class FakeDriver(base.BaseDriver):
|
||||
def listen(self, target):
|
||||
exchange = target.exchange or self._default_exchange
|
||||
listener = FakeListener(self, self._exchange_manager,
|
||||
[messaging.Target(topic=target.topic,
|
||||
server=target.server,
|
||||
exchange=exchange),
|
||||
messaging.Target(topic=target.topic,
|
||||
exchange=exchange)])
|
||||
[oslo_messaging.Target(
|
||||
topic=target.topic,
|
||||
server=target.server,
|
||||
exchange=exchange),
|
||||
oslo_messaging.Target(
|
||||
topic=target.topic,
|
||||
exchange=exchange)])
|
||||
return listener
|
||||
|
||||
def listen_for_notifications(self, targets_and_priorities, pool):
|
||||
targets = [messaging.Target(topic='%s.%s' % (target.topic, priority),
|
||||
exchange=target.exchange)
|
||||
for target, priority in targets_and_priorities]
|
||||
targets = [
|
||||
oslo_messaging.Target(
|
||||
topic='%s.%s' % (target.topic, priority),
|
||||
exchange=target.exchange)
|
||||
for target, priority in targets_and_priorities]
|
||||
listener = FakeListener(self, self._exchange_manager, targets, pool)
|
||||
|
||||
return listener
|
@ -23,14 +23,14 @@ import time
|
||||
import six
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.messaging._drivers import amqp as rpc_amqp
|
||||
from oslo.messaging._drivers import amqpdriver
|
||||
from oslo.messaging._drivers import common as rpc_common
|
||||
from oslo.messaging._i18n import _
|
||||
from oslo.messaging import exceptions
|
||||
from oslo.serialization import jsonutils
|
||||
from oslo.utils import importutils
|
||||
from oslo.utils import netutils
|
||||
from oslo_messaging._drivers import amqp as rpc_amqp
|
||||
from oslo_messaging._drivers import amqpdriver
|
||||
from oslo_messaging._drivers import common as rpc_common
|
||||
from oslo_messaging._i18n import _
|
||||
from oslo_messaging import exceptions
|
||||
|
||||
qpid_codec = importutils.try_import("qpid.codec010")
|
||||
qpid_messaging = importutils.try_import("qpid.messaging")
|
||||
@ -594,7 +594,7 @@ class Connection(object):
|
||||
LOG.warn("Process forked! "
|
||||
"This can result in unpredictable behavior. "
|
||||
"See: http://docs.openstack.org/developer/"
|
||||
"oslo.messaging/transport.html")
|
||||
"oslo_messaging/transport.html")
|
||||
self._initial_pid = current_pid
|
||||
|
||||
while True:
|
@ -30,13 +30,13 @@ import six
|
||||
from six.moves.urllib import parse
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.messaging._drivers import amqp as rpc_amqp
|
||||
from oslo.messaging._drivers import amqpdriver
|
||||
from oslo.messaging._drivers import common as rpc_common
|
||||
from oslo.messaging._i18n import _
|
||||
from oslo.messaging._i18n import _LI
|
||||
from oslo.messaging import exceptions
|
||||
from oslo.utils import netutils
|
||||
from oslo_messaging._drivers import amqp as rpc_amqp
|
||||
from oslo_messaging._drivers import amqpdriver
|
||||
from oslo_messaging._drivers import common as rpc_common
|
||||
from oslo_messaging._i18n import _
|
||||
from oslo_messaging._i18n import _LI
|
||||
from oslo_messaging import exceptions
|
||||
|
||||
|
||||
rabbit_opts = [
|
||||
@ -104,7 +104,7 @@ rabbit_opts = [
|
||||
'If you change this option, you must wipe the '
|
||||
'RabbitMQ database.'),
|
||||
|
||||
# NOTE(sileht): deprecated option since oslo.messaging 1.5.0,
|
||||
# NOTE(sileht): deprecated option since oslo_messaging 1.5.0,
|
||||
cfg.BoolOpt('fake_rabbit',
|
||||
default=False,
|
||||
help='Deprecated, use rpc_backend=kombu+memory or '
|
||||
@ -583,7 +583,7 @@ class Connection(object):
|
||||
LOG.warn("Process forked after connection established! "
|
||||
"This can result in unpredictable behavior. "
|
||||
"See: http://docs.openstack.org/developer/"
|
||||
"oslo.messaging/transport.html")
|
||||
"oslo_messaging/transport.html")
|
||||
self._initial_pid = current_pid
|
||||
|
||||
if retry is None:
|
@ -29,10 +29,10 @@ import six
|
||||
from six import moves
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.messaging._drivers import base
|
||||
from oslo.messaging._drivers import common as rpc_common
|
||||
from oslo.messaging._executors import base as executor_base # FIXME(markmc)
|
||||
from oslo.messaging._i18n import _, _LE
|
||||
from oslo_messaging._drivers import base
|
||||
from oslo_messaging._drivers import common as rpc_common
|
||||
from oslo_messaging._executors import base as executor_base # FIXME(markmc)
|
||||
from oslo_messaging._i18n import _, _LE
|
||||
from oslo.serialization import jsonutils
|
||||
from oslo.utils import excutils
|
||||
from oslo.utils import importutils
|
||||
@ -56,7 +56,7 @@ zmq_opts = [
|
||||
# The module.Class to use for matchmaking.
|
||||
cfg.StrOpt(
|
||||
'rpc_zmq_matchmaker',
|
||||
default=('oslo.messaging._drivers.'
|
||||
default=('oslo_messaging._drivers.'
|
||||
'matchmaker.MatchMakerLocalhost'),
|
||||
help='MatchMaker driver.',
|
||||
),
|
||||
@ -124,7 +124,14 @@ class ZmqSocket(object):
|
||||
# Enable IPv6-support in libzmq.
|
||||
# When IPv6 is enabled, a socket will connect to, or accept
|
||||
# connections from, both IPv4 and IPv6 hosts.
|
||||
self.sock.ipv6 = True
|
||||
try:
|
||||
self.sock.ipv6 = True
|
||||
except AttributeError:
|
||||
# NOTE(dhellmann): Sometimes the underlying library does
|
||||
# not recognize the IPV6 option. There's nothing we can
|
||||
# really do in that case, so ignore the error and keep
|
||||
# trying to work.
|
||||
pass
|
||||
|
||||
self.addr = addr
|
||||
self.type = zmq_type
|
@ -22,7 +22,7 @@ import logging
|
||||
import eventlet
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.messaging._i18n import _
|
||||
from oslo_messaging._i18n import _
|
||||
|
||||
matchmaker_opts = [
|
||||
cfg.IntOpt('matchmaker_heartbeat_freq',
|
@ -17,8 +17,8 @@ return keys for direct exchanges, per (approximate) AMQP parlance.
|
||||
"""
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.messaging._drivers import matchmaker as mm_common
|
||||
from oslo.utils import importutils
|
||||
from oslo_messaging._drivers import matchmaker as mm_common
|
||||
|
||||
redis = importutils.try_import('redis')
|
||||
|
@ -21,8 +21,8 @@ import json
|
||||
import logging
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.messaging._drivers import matchmaker as mm
|
||||
from oslo.messaging._i18n import _
|
||||
from oslo_messaging._drivers import matchmaker as mm
|
||||
from oslo_messaging._i18n import _
|
||||
|
||||
matchmaker_opts = [
|
||||
# Matchmaker ring file
|
@ -34,9 +34,9 @@ import pyngus
|
||||
from six import moves
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.messaging._drivers.protocols.amqp import eventloop
|
||||
from oslo.messaging._drivers.protocols.amqp import opts
|
||||
from oslo.messaging import transport
|
||||
from oslo_messaging._drivers.protocols.amqp import eventloop
|
||||
from oslo_messaging._drivers.protocols.amqp import opts
|
||||
from oslo_messaging import transport
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
@ -28,12 +28,12 @@ import time
|
||||
import proton
|
||||
from six import moves
|
||||
|
||||
from oslo import messaging
|
||||
from oslo.messaging._drivers import base
|
||||
from oslo.messaging._drivers import common
|
||||
from oslo.messaging._drivers.protocols.amqp import controller
|
||||
from oslo.messaging import target as messaging_target
|
||||
from oslo.serialization import jsonutils
|
||||
import oslo_messaging
|
||||
from oslo_messaging._drivers import base
|
||||
from oslo_messaging._drivers import common
|
||||
from oslo_messaging._drivers.protocols.amqp import controller
|
||||
from oslo_messaging import target as messaging_target
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -67,7 +67,8 @@ class SendTask(controller.Task):
|
||||
try:
|
||||
return self._reply_queue.get(timeout=timeout)
|
||||
except moves.queue.Empty:
|
||||
raise messaging.MessagingTimeout('Timed out waiting for a reply')
|
||||
raise oslo_messaging.MessagingTimeout(
|
||||
'Timed out waiting for a reply')
|
||||
|
||||
|
||||
class ListenTask(controller.Task):
|
@ -15,8 +15,8 @@
|
||||
|
||||
import logging
|
||||
|
||||
from oslo.messaging._executors import base
|
||||
from oslo.messaging._i18n import _
|
||||
from oslo_messaging._executors import base
|
||||
from oslo_messaging._i18n import _
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
@ -21,9 +21,9 @@ from eventlet.green import threading as greenthreading
|
||||
from eventlet import greenpool
|
||||
import greenlet
|
||||
|
||||
from oslo.messaging._executors import base
|
||||
from oslo.messaging import localcontext
|
||||
from oslo.utils import excutils
|
||||
from oslo_messaging._executors import base
|
||||
from oslo_messaging import localcontext
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
@ -22,8 +22,8 @@ import threading
|
||||
from concurrent import futures
|
||||
import six
|
||||
|
||||
from oslo.messaging._executors import base
|
||||
from oslo.utils import excutils
|
||||
from oslo_messaging._executors import base
|
||||
|
||||
|
||||
class ThreadExecutor(base.PooledExecutorBase):
|
78
oslo_messaging/conffixture.py
Normal file
78
oslo_messaging/conffixture.py
Normal file
@ -0,0 +1,78 @@
|
||||
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
__all__ = ['ConfFixture']
|
||||
|
||||
import sys
|
||||
|
||||
import fixtures
|
||||
|
||||
|
||||
def _import_opts(conf, module, opts):
|
||||
__import__(module)
|
||||
conf.register_opts(getattr(sys.modules[module], opts))
|
||||
|
||||
|
||||
class ConfFixture(fixtures.Fixture):
|
||||
|
||||
"""Tweak configuration options for unit testing.
|
||||
|
||||
oslo.messaging registers a number of configuration options, but rather than
|
||||
directly referencing those options, users of the API should use this
|
||||
interface for querying and overriding certain configuration options.
|
||||
|
||||
An example usage::
|
||||
|
||||
self.messaging_conf = self.useFixture(messaging.ConfFixture(cfg.CONF))
|
||||
self.messaging_conf.transport_driver = 'fake'
|
||||
|
||||
:param conf: a ConfigOpts instance
|
||||
:type conf: oslo.config.cfg.ConfigOpts
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
self.conf = conf
|
||||
_import_opts(self.conf,
|
||||
'oslo_messaging._drivers.impl_rabbit', 'rabbit_opts')
|
||||
_import_opts(self.conf,
|
||||
'oslo_messaging._drivers.impl_qpid', 'qpid_opts')
|
||||
_import_opts(self.conf,
|
||||
'oslo_messaging._drivers.amqp', 'amqp_opts')
|
||||
_import_opts(self.conf, 'oslo_messaging.rpc.client', '_client_opts')
|
||||
_import_opts(self.conf, 'oslo_messaging.transport', '_transport_opts')
|
||||
_import_opts(self.conf,
|
||||
'oslo_messaging.notify.notifier', '_notifier_opts')
|
||||
|
||||
def setUp(self):
|
||||
super(ConfFixture, self).setUp()
|
||||
self.addCleanup(self.conf.reset)
|
||||
|
||||
@property
|
||||
def transport_driver(self):
|
||||
"""The transport driver - for example 'rabbit', 'qpid' or 'fake'."""
|
||||
return self.conf.rpc_backend
|
||||
|
||||
@transport_driver.setter
|
||||
def transport_driver(self, value):
|
||||
self.conf.set_override('rpc_backend', value)
|
||||
|
||||
@property
|
||||
def response_timeout(self):
|
||||
"""Default number of seconds to wait for a response from a call."""
|
||||
return self.conf.rpc_response_timeout
|
||||
|
||||
@response_timeout.setter
|
||||
def response_timeout(self, value):
|
||||
self.conf.set_override('rpc_response_timeout', value)
|
40
oslo_messaging/exceptions.py
Normal file
40
oslo_messaging/exceptions.py
Normal file
@ -0,0 +1,40 @@
|
||||
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
__all__ = ['MessagingException', 'MessagingTimeout', 'MessageDeliveryFailure',
|
||||
'InvalidTarget']
|
||||
|
||||
import six
|
||||
|
||||
|
||||
class MessagingException(Exception):
|
||||
"""Base class for exceptions."""
|
||||
|
||||
|
||||
class MessagingTimeout(MessagingException):
|
||||
"""Raised if message sending times out."""
|
||||
|
||||
|
||||
class MessageDeliveryFailure(MessagingException):
|
||||
"""Raised if message sending failed after the asked retry."""
|
||||
|
||||
|
||||
class InvalidTarget(MessagingException, ValueError):
|
||||
"""Raised if a target does not meet certain pre-conditions."""
|
||||
|
||||
def __init__(self, msg, target):
|
||||
msg = msg + ":" + six.text_type(target)
|
||||
super(InvalidTarget, self).__init__(msg)
|
||||
self.target = target
|
55
oslo_messaging/localcontext.py
Normal file
55
oslo_messaging/localcontext.py
Normal file
@ -0,0 +1,55 @@
|
||||
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
__all__ = [
|
||||
'get_local_context',
|
||||
'set_local_context',
|
||||
'clear_local_context',
|
||||
]
|
||||
|
||||
import threading
|
||||
import uuid
|
||||
|
||||
_KEY = '_%s_%s' % (__name__.replace('.', '_'), uuid.uuid4().hex)
|
||||
_STORE = threading.local()
|
||||
|
||||
|
||||
def get_local_context(ctxt):
|
||||
"""Retrieve the RPC endpoint request context for the current thread.
|
||||
|
||||
This method allows any code running in the context of a dispatched RPC
|
||||
endpoint method to retrieve the context for this request.
|
||||
|
||||
This is commonly used for logging so that, for example, you can include the
|
||||
request ID, user and tenant in every message logged from a RPC endpoint
|
||||
method.
|
||||
|
||||
:returns: the context for the request dispatched in the current thread
|
||||
"""
|
||||
return getattr(_STORE, _KEY, None)
|
||||
|
||||
|
||||
def set_local_context(ctxt):
|
||||
"""Set the request context for the current thread.
|
||||
|
||||
:param ctxt: a deserialized request context
|
||||
:type ctxt: dict
|
||||
"""
|
||||
setattr(_STORE, _KEY, ctxt)
|
||||
|
||||
|
||||
def clear_local_context():
|
||||
"""Clear the request context for the current thread."""
|
||||
delattr(_STORE, _KEY)
|
27
oslo_messaging/notify/__init__.py
Normal file
27
oslo_messaging/notify/__init__.py
Normal file
@ -0,0 +1,27 @@
|
||||
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
__all__ = ['Notifier',
|
||||
'LoggingNotificationHandler',
|
||||
'get_notification_listener',
|
||||
'NotificationResult',
|
||||
'PublishErrorsHandler',
|
||||
'LoggingErrorNotificationHandler']
|
||||
|
||||
from .notifier import *
|
||||
from .listener import *
|
||||
from .log_handler import *
|
||||
from .logger import *
|
||||
from .dispatcher import NotificationResult
|
@ -17,14 +17,19 @@
|
||||
|
||||
import logging
|
||||
|
||||
from oslo.messaging.notify import notifier
|
||||
from oslo.serialization import jsonutils
|
||||
from oslo_messaging.notify import notifier
|
||||
|
||||
|
||||
class LogDriver(notifier._Driver):
|
||||
|
||||
"Publish notifications via Python logging infrastructure."
|
||||
|
||||
# NOTE(dhellmann): For backwards-compatibility with configurations
|
||||
# that may have modified the settings for this logger using a
|
||||
# configuration file, we keep the name
|
||||
# 'oslo.messaging.notification' even though the package is now
|
||||
# 'oslo_messaging'.
|
||||
LOGGER_BASE = 'oslo.messaging.notification'
|
||||
|
||||
def notify(self, ctxt, message, priority, retry):
|
@ -17,8 +17,8 @@
|
||||
|
||||
import logging
|
||||
|
||||
from oslo import messaging
|
||||
from oslo.messaging.notify import notifier
|
||||
import oslo_messaging
|
||||
from oslo_messaging.notify import notifier
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -41,7 +41,7 @@ class MessagingDriver(notifier._Driver):
|
||||
def notify(self, ctxt, message, priority, retry):
|
||||
priority = priority.lower()
|
||||
for topic in self.topics:
|
||||
target = messaging.Target(topic='%s.%s' % (topic, priority))
|
||||
target = oslo_messaging.Target(topic='%s.%s' % (topic, priority))
|
||||
try:
|
||||
self.transport._send_notification(target, ctxt, message,
|
||||
version=self.version,
|
@ -15,7 +15,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.messaging.notify import notifier
|
||||
from oslo_messaging.notify import notifier
|
||||
|
||||
|
||||
class NoOpDriver(notifier._Driver):
|
@ -21,8 +21,8 @@ from stevedore import dispatch
|
||||
import yaml
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.messaging._i18n import _
|
||||
from oslo.messaging.notify import notifier
|
||||
from oslo_messaging._i18n import _
|
||||
from oslo_messaging.notify import notifier
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
@ -15,7 +15,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.messaging.notify import notifier
|
||||
from oslo_messaging.notify import notifier
|
||||
|
||||
NOTIFICATIONS = []
|
||||
|
135
oslo_messaging/notify/dispatcher.py
Normal file
135
oslo_messaging/notify/dispatcher.py
Normal file
@ -0,0 +1,135 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2013 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import contextlib
|
||||
import itertools
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from oslo_messaging import localcontext
|
||||
from oslo_messaging import serializer as msg_serializer
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
PRIORITIES = ['audit', 'debug', 'info', 'warn', 'error', 'critical', 'sample']
|
||||
|
||||
|
||||
class NotificationResult(object):
|
||||
HANDLED = 'handled'
|
||||
REQUEUE = 'requeue'
|
||||
|
||||
|
||||
class NotificationDispatcher(object):
|
||||
"""A message dispatcher which understands Notification messages.
|
||||
|
||||
A MessageHandlingServer is constructed by passing a callable dispatcher
|
||||
which is invoked with context and message dictionaries each time a message
|
||||
is received.
|
||||
|
||||
NotifcationDispatcher is one such dispatcher which pass a raw notification
|
||||
message to the endpoints
|
||||
"""
|
||||
|
||||
def __init__(self, targets, endpoints, serializer, allow_requeue,
|
||||
pool=None):
|
||||
self.targets = targets
|
||||
self.endpoints = endpoints
|
||||
self.serializer = serializer or msg_serializer.NoOpSerializer()
|
||||
self.allow_requeue = allow_requeue
|
||||
self.pool = pool
|
||||
|
||||
self._callbacks_by_priority = {}
|
||||
for endpoint, prio in itertools.product(endpoints, PRIORITIES):
|
||||
if hasattr(endpoint, prio):
|
||||
method = getattr(endpoint, prio)
|
||||
self._callbacks_by_priority.setdefault(prio, []).append(method)
|
||||
|
||||
priorities = self._callbacks_by_priority.keys()
|
||||
self._targets_priorities = set(itertools.product(self.targets,
|
||||
priorities))
|
||||
|
||||
def _listen(self, transport):
|
||||
return transport._listen_for_notifications(self._targets_priorities,
|
||||
pool=self.pool)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def __call__(self, incoming, executor_callback=None):
|
||||
result_wrapper = []
|
||||
|
||||
yield lambda: result_wrapper.append(
|
||||
self._dispatch_and_handle_error(incoming, executor_callback))
|
||||
|
||||
if result_wrapper[0] == NotificationResult.HANDLED:
|
||||
incoming.acknowledge()
|
||||
else:
|
||||
incoming.requeue()
|
||||
|
||||
def _dispatch_and_handle_error(self, incoming, executor_callback):
|
||||
"""Dispatch a notification message to the appropriate endpoint method.
|
||||
|
||||
:param incoming: the incoming notification message
|
||||
:type ctxt: IncomingMessage
|
||||
"""
|
||||
try:
|
||||
return self._dispatch(incoming.ctxt, incoming.message,
|
||||
executor_callback)
|
||||
except Exception:
|
||||
# sys.exc_info() is deleted by LOG.exception().
|
||||
exc_info = sys.exc_info()
|
||||
LOG.error('Exception during message handling',
|
||||
exc_info=exc_info)
|
||||
return NotificationResult.HANDLED
|
||||
|
||||
def _dispatch(self, ctxt, message, executor_callback=None):
|
||||
"""Dispatch an RPC message to the appropriate endpoint method.
|
||||
|
||||
:param ctxt: the request context
|
||||
:type ctxt: dict
|
||||
:param message: the message payload
|
||||
:type message: dict
|
||||
"""
|
||||
ctxt = self.serializer.deserialize_context(ctxt)
|
||||
|
||||
publisher_id = message.get('publisher_id')
|
||||
event_type = message.get('event_type')
|
||||
metadata = {
|
||||
'message_id': message.get('message_id'),
|
||||
'timestamp': message.get('timestamp')
|
||||
}
|
||||
priority = message.get('priority', '').lower()
|
||||
if priority not in PRIORITIES:
|
||||
LOG.warning('Unknown priority "%s"', priority)
|
||||
return
|
||||
|
||||
payload = self.serializer.deserialize_entity(ctxt,
|
||||
message.get('payload'))
|
||||
|
||||
for callback in self._callbacks_by_priority.get(priority, []):
|
||||
localcontext.set_local_context(ctxt)
|
||||
try:
|
||||
if executor_callback:
|
||||
ret = executor_callback(callback, ctxt, publisher_id,
|
||||
event_type, payload, metadata)
|
||||
else:
|
||||
ret = callback(ctxt, publisher_id, event_type, payload,
|
||||
metadata)
|
||||
ret = NotificationResult.HANDLED if ret is None else ret
|
||||
if self.allow_requeue and ret == NotificationResult.REQUEUE:
|
||||
return ret
|
||||
finally:
|
||||
localcontext.clear_local_context()
|
||||
return NotificationResult.HANDLED
|
137
oslo_messaging/notify/listener.py
Normal file
137
oslo_messaging/notify/listener.py
Normal file
@ -0,0 +1,137 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2013 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""A notification listener exposes a number of endpoints, each of which
|
||||
contain a set of methods. Each method corresponds to a notification priority.
|
||||
|
||||
To create a notification listener, you supply a transport, list of targets and
|
||||
a list of endpoints.
|
||||
|
||||
A transport can be obtained simply by calling the get_transport() method::
|
||||
|
||||
transport = messaging.get_transport(conf)
|
||||
|
||||
which will load the appropriate transport driver according to the user's
|
||||
messaging configuration configuration. See get_transport() for more details.
|
||||
|
||||
The target supplied when creating a notification listener expresses the topic
|
||||
and - optionally - the exchange to listen on. See Target for more details
|
||||
on these attributes.
|
||||
|
||||
Notification listener have start(), stop() and wait() messages to begin
|
||||
handling requests, stop handling requests and wait for all in-process
|
||||
requests to complete.
|
||||
|
||||
Each notification listener is associated with an executor which integrates the
|
||||
listener with a specific I/O handling framework. Currently, there are blocking
|
||||
and eventlet executors available.
|
||||
|
||||
A simple example of a notification listener with multiple endpoints might be::
|
||||
|
||||
from oslo.config import cfg
|
||||
import oslo_messaging
|
||||
|
||||
class NotificationEndpoint(object):
|
||||
def warn(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
do_something(payload)
|
||||
|
||||
class ErrorEndpoint(object):
|
||||
def error(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
do_something(payload)
|
||||
|
||||
transport = oslo_messaging.get_transport(cfg.CONF)
|
||||
targets = [
|
||||
oslo_messaging.Target(topic='notifications')
|
||||
oslo_messaging.Target(topic='notifications_bis')
|
||||
]
|
||||
endpoints = [
|
||||
NotificationEndpoint(),
|
||||
ErrorEndpoint(),
|
||||
]
|
||||
pool = "listener-workers"
|
||||
server = oslo_messaging.get_notification_listener(transport, targets,
|
||||
endpoints, pool)
|
||||
server.start()
|
||||
server.wait()
|
||||
|
||||
A notifier sends a notification on a topic with a priority, the notification
|
||||
listener will receive this notification if the topic of this one have been set
|
||||
in one of the targets and if an endpoint implements the method named like the
|
||||
priority
|
||||
|
||||
Parameters to endpoint methods are the request context supplied by the client,
|
||||
the publisher_id of the notification message, the event_type, the payload and
|
||||
metadata. The metadata parameter is a mapping containing a unique message_id
|
||||
and a timestamp.
|
||||
|
||||
By supplying a serializer object, a listener can deserialize a request context
|
||||
and arguments from - and serialize return values to - primitive types.
|
||||
|
||||
By supplying a pool name you can create multiple groups of listeners consuming
|
||||
notifications and that each group only receives one copy of each
|
||||
notification.
|
||||
|
||||
An endpoint method can explicitly return
|
||||
oslo_messaging.NotificationResult.HANDLED to acknowledge a message or
|
||||
oslo_messaging.NotificationResult.REQUEUE to requeue the message.
|
||||
|
||||
The message is acknowledged only if all endpoints either return
|
||||
oslo_messaging.NotificationResult.HANDLED or None.
|
||||
|
||||
Note that not all transport drivers implement support for requeueing. In order
|
||||
to use this feature, applications should assert that the feature is available
|
||||
by passing allow_requeue=True to get_notification_listener(). If the driver
|
||||
does not support requeueing, it will raise NotImplementedError at this point.
|
||||
|
||||
"""
|
||||
|
||||
from oslo_messaging.notify import dispatcher as notify_dispatcher
|
||||
from oslo_messaging import server as msg_server
|
||||
|
||||
|
||||
def get_notification_listener(transport, targets, endpoints,
|
||||
executor='blocking', serializer=None,
|
||||
allow_requeue=False, pool=None):
|
||||
"""Construct a notification listener
|
||||
|
||||
The executor parameter controls how incoming messages will be received and
|
||||
dispatched. By default, the most simple executor is used - the blocking
|
||||
executor.
|
||||
|
||||
If the eventlet executor is used, the threading and time library need to be
|
||||
monkeypatched.
|
||||
|
||||
:param transport: the messaging transport
|
||||
:type transport: Transport
|
||||
:param targets: the exchanges and topics to listen on
|
||||
:type targets: list of Target
|
||||
:param endpoints: a list of endpoint objects
|
||||
:type endpoints: list
|
||||
:param executor: name of a message executor - for example
|
||||
'eventlet', 'blocking'
|
||||
:type executor: str
|
||||
:param serializer: an optional entity serializer
|
||||
:type serializer: Serializer
|
||||
:param allow_requeue: whether NotificationResult.REQUEUE support is needed
|
||||
:type allow_requeue: bool
|
||||
:param pool: the pool name
|
||||
:type pool: str
|
||||
:raises: NotImplementedError
|
||||
"""
|
||||
transport._require_driver_features(requeue=allow_requeue)
|
||||
dispatcher = notify_dispatcher.NotificationDispatcher(targets, endpoints,
|
||||
serializer,
|
||||
allow_requeue, pool)
|
||||
return msg_server.MessageHandlingServer(transport, dispatcher, executor)
|
42
oslo_messaging/notify/log_handler.py
Normal file
42
oslo_messaging/notify/log_handler.py
Normal file
@ -0,0 +1,42 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
|
||||
class LoggingErrorNotificationHandler(logging.Handler):
|
||||
def __init__(self, *args, **kwargs):
|
||||
# NOTE(dhellmann): Avoid a cyclical import by doing this one
|
||||
# at runtime.
|
||||
import oslo_messaging
|
||||
logging.Handler.__init__(self, *args, **kwargs)
|
||||
self._transport = oslo_messaging.get_transport(cfg.CONF)
|
||||
self._notifier = oslo_messaging.Notifier(
|
||||
self._transport,
|
||||
publisher_id='error.publisher')
|
||||
|
||||
def emit(self, record):
|
||||
# NOTE(bnemec): Notifier registers this opt with the transport.
|
||||
if ('log' in self._transport.conf.notification_driver):
|
||||
# NOTE(lbragstad): If we detect that log is one of the
|
||||
# notification drivers, then return. This protects from infinite
|
||||
# recursion where something bad happens, it gets logged, the log
|
||||
# handler sends a notification, and the log_notifier sees the
|
||||
# notification and logs it.
|
||||
return
|
||||
self._notifier.error(None, 'error_notification',
|
||||
dict(error=record.msg))
|
||||
|
||||
|
||||
PublishErrorsHandler = LoggingErrorNotificationHandler
|
81
oslo_messaging/notify/logger.py
Normal file
81
oslo_messaging/notify/logger.py
Normal file
@ -0,0 +1,81 @@
|
||||
# Copyright 2013 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Driver for the Python logging package that sends log records as a notification.
|
||||
"""
|
||||
import logging
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo_messaging.notify import notifier
|
||||
from oslo_messaging import transport
|
||||
|
||||
|
||||
class LoggingNotificationHandler(logging.Handler):
|
||||
"""Handler for logging to the messaging notification system.
|
||||
|
||||
Each time the application logs a message using the :py:mod:`logging`
|
||||
module, it will be sent as a notification. The severity used for the
|
||||
notification will be the same as the one used for the log record.
|
||||
|
||||
This can be used into a Python logging configuration this way::
|
||||
|
||||
[handler_notifier]
|
||||
class=oslo_messaging.LoggingNotificationHandler
|
||||
level=ERROR
|
||||
args=('qpid:///')
|
||||
|
||||
"""
|
||||
|
||||
CONF = cfg.CONF
|
||||
"""Default configuration object used, subclass this class if you want to
|
||||
use another one.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, url, publisher_id=None, driver=None,
|
||||
topic=None, serializer=None):
|
||||
self.notifier = notifier.Notifier(
|
||||
transport.get_transport(self.CONF, url),
|
||||
publisher_id, driver,
|
||||
topic,
|
||||
serializer() if serializer else None)
|
||||
logging.Handler.__init__(self)
|
||||
|
||||
def emit(self, record):
|
||||
"""Emit the log record to the messaging notification system.
|
||||
|
||||
:param record: A log record to emit.
|
||||
|
||||
"""
|
||||
method = getattr(self.notifier, record.levelname.lower(), None)
|
||||
|
||||
if not method:
|
||||
return
|
||||
|
||||
method(None,
|
||||
'logrecord',
|
||||
{
|
||||
'name': record.name,
|
||||
'levelno': record.levelno,
|
||||
'levelname': record.levelname,
|
||||
'exc_info': record.exc_info,
|
||||
'pathname': record.pathname,
|
||||
'lineno': record.lineno,
|
||||
'msg': record.getMessage(),
|
||||
'funcName': record.funcName,
|
||||
'thread': record.thread,
|
||||
'processName': record.processName,
|
||||
'process': record.process,
|
||||
'extra': getattr(record, 'extra', None),
|
||||
})
|
128
oslo_messaging/notify/middleware.py
Normal file
128
oslo_messaging/notify/middleware.py
Normal file
@ -0,0 +1,128 @@
|
||||
# Copyright (c) 2013-2014 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Send notifications on request
|
||||
|
||||
"""
|
||||
import logging
|
||||
import os.path
|
||||
import sys
|
||||
import traceback as tb
|
||||
|
||||
import six
|
||||
import webob.dec
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.middleware import base
|
||||
import oslo_messaging
|
||||
from oslo_messaging._i18n import _LE
|
||||
from oslo_messaging import notify
|
||||
from oslo_messaging.openstack.common import context
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def log_and_ignore_error(fn):
|
||||
def wrapped(*args, **kwargs):
|
||||
try:
|
||||
return fn(*args, **kwargs)
|
||||
except Exception as e:
|
||||
LOG.exception(_LE('An exception occurred processing '
|
||||
'the API call: %s ') % e)
|
||||
return wrapped
|
||||
|
||||
|
||||
class RequestNotifier(base.Middleware):
|
||||
"""Send notification on request."""
|
||||
|
||||
@classmethod
|
||||
def factory(cls, global_conf, **local_conf):
|
||||
"""Factory method for paste.deploy."""
|
||||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
|
||||
def _factory(app):
|
||||
return cls(app, **conf)
|
||||
return _factory
|
||||
|
||||
def __init__(self, app, **conf):
|
||||
self.notifier = notify.Notifier(
|
||||
oslo_messaging.get_transport(cfg.CONF, conf.get('url')),
|
||||
publisher_id=conf.get('publisher_id',
|
||||
os.path.basename(sys.argv[0])))
|
||||
self.service_name = conf.get('service_name')
|
||||
self.ignore_req_list = [x.upper().strip() for x in
|
||||
conf.get('ignore_req_list', '').split(',')]
|
||||
super(RequestNotifier, self).__init__(app)
|
||||
|
||||
@staticmethod
|
||||
def environ_to_dict(environ):
|
||||
"""Following PEP 333, server variables are lower case, so don't
|
||||
include them.
|
||||
|
||||
"""
|
||||
return dict((k, v) for k, v in six.iteritems(environ)
|
||||
if k.isupper() and k != 'HTTP_X_AUTH_TOKEN')
|
||||
|
||||
@log_and_ignore_error
|
||||
def process_request(self, request):
|
||||
request.environ['HTTP_X_SERVICE_NAME'] = \
|
||||
self.service_name or request.host
|
||||
payload = {
|
||||
'request': self.environ_to_dict(request.environ),
|
||||
}
|
||||
|
||||
self.notifier.info(context.get_admin_context(),
|
||||
'http.request',
|
||||
payload)
|
||||
|
||||
@log_and_ignore_error
|
||||
def process_response(self, request, response,
|
||||
exception=None, traceback=None):
|
||||
payload = {
|
||||
'request': self.environ_to_dict(request.environ),
|
||||
}
|
||||
|
||||
if response:
|
||||
payload['response'] = {
|
||||
'status': response.status,
|
||||
'headers': response.headers,
|
||||
}
|
||||
|
||||
if exception:
|
||||
payload['exception'] = {
|
||||
'value': repr(exception),
|
||||
'traceback': tb.format_tb(traceback)
|
||||
}
|
||||
|
||||
self.notifier.info(context.get_admin_context(),
|
||||
'http.response',
|
||||
payload)
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
if req.method in self.ignore_req_list:
|
||||
return req.get_response(self.application)
|
||||
else:
|
||||
self.process_request(req)
|
||||
try:
|
||||
response = req.get_response(self.application)
|
||||
except Exception:
|
||||
exc_type, value, traceback = sys.exc_info()
|
||||
self.process_response(req, None, value, traceback)
|
||||
raise
|
||||
else:
|
||||
self.process_response(req, response)
|
||||
return response
|
315
oslo_messaging/notify/notifier.py
Normal file
315
oslo_messaging/notify/notifier.py
Normal file
@ -0,0 +1,315 @@
|
||||
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import logging
|
||||
import uuid
|
||||
|
||||
import six
|
||||
from stevedore import named
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.utils import timeutils
|
||||
from oslo_messaging import serializer as msg_serializer
|
||||
|
||||
_notifier_opts = [
|
||||
cfg.MultiStrOpt('notification_driver',
|
||||
default=[],
|
||||
help='Driver or drivers to handle sending notifications.'),
|
||||
cfg.ListOpt('notification_topics',
|
||||
default=['notifications', ],
|
||||
deprecated_name='topics',
|
||||
deprecated_group='rpc_notifier2',
|
||||
help='AMQP topic used for OpenStack notifications.'),
|
||||
]
|
||||
|
||||
_LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class _Driver(object):
|
||||
|
||||
def __init__(self, conf, topics, transport):
|
||||
self.conf = conf
|
||||
self.topics = topics
|
||||
self.transport = transport
|
||||
|
||||
@abc.abstractmethod
|
||||
def notify(self, ctxt, msg, priority, retry):
|
||||
pass
|
||||
|
||||
|
||||
class Notifier(object):
|
||||
|
||||
"""Send notification messages.
|
||||
|
||||
The Notifier class is used for sending notification messages over a
|
||||
messaging transport or other means.
|
||||
|
||||
Notification messages follow the following format::
|
||||
|
||||
{'message_id': six.text_type(uuid.uuid4()),
|
||||
'publisher_id': 'compute.host1',
|
||||
'timestamp': timeutils.utcnow(),
|
||||
'priority': 'WARN',
|
||||
'event_type': 'compute.create_instance',
|
||||
'payload': {'instance_id': 12, ... }}
|
||||
|
||||
A Notifier object can be instantiated with a transport object and a
|
||||
publisher ID:
|
||||
|
||||
notifier = messaging.Notifier(get_transport(CONF), 'compute')
|
||||
|
||||
and notifications are sent via drivers chosen with the notification_driver
|
||||
config option and on the topics chosen with the notification_topics config
|
||||
option.
|
||||
|
||||
Alternatively, a Notifier object can be instantiated with a specific
|
||||
driver or topic::
|
||||
|
||||
notifier = notifier.Notifier(RPC_TRANSPORT,
|
||||
'compute.host',
|
||||
driver='messaging',
|
||||
topic='notifications')
|
||||
|
||||
Notifier objects are relatively expensive to instantiate (mostly the cost
|
||||
of loading notification drivers), so it is possible to specialize a given
|
||||
Notifier object with a different publisher id using the prepare() method::
|
||||
|
||||
notifier = notifier.prepare(publisher_id='compute')
|
||||
notifier.info(ctxt, event_type, payload)
|
||||
"""
|
||||
|
||||
def __init__(self, transport, publisher_id=None,
|
||||
driver=None, topic=None,
|
||||
serializer=None, retry=None):
|
||||
"""Construct a Notifier object.
|
||||
|
||||
:param transport: the transport to use for sending messages
|
||||
:type transport: oslo_messaging.Transport
|
||||
:param publisher_id: field in notifications sent, for example
|
||||
'compute.host1'
|
||||
:type publisher_id: str
|
||||
:param driver: a driver to lookup from oslo_messaging.notify.drivers
|
||||
:type driver: str
|
||||
:param topic: the topic which to send messages on
|
||||
:type topic: str
|
||||
:param serializer: an optional entity serializer
|
||||
:type serializer: Serializer
|
||||
:param retry: an connection retries configuration
|
||||
None or -1 means to retry forever
|
||||
0 means no retry
|
||||
N means N retries
|
||||
:type retry: int
|
||||
"""
|
||||
transport.conf.register_opts(_notifier_opts)
|
||||
|
||||
self.transport = transport
|
||||
self.publisher_id = publisher_id
|
||||
self.retry = retry
|
||||
|
||||
self._driver_names = ([driver] if driver is not None
|
||||
else transport.conf.notification_driver)
|
||||
|
||||
self._topics = ([topic] if topic is not None
|
||||
else transport.conf.notification_topics)
|
||||
self._serializer = serializer or msg_serializer.NoOpSerializer()
|
||||
|
||||
self._driver_mgr = named.NamedExtensionManager(
|
||||
'oslo.messaging.notify.drivers',
|
||||
names=self._driver_names,
|
||||
invoke_on_load=True,
|
||||
invoke_args=[transport.conf],
|
||||
invoke_kwds={
|
||||
'topics': self._topics,
|
||||
'transport': self.transport,
|
||||
}
|
||||
)
|
||||
|
||||
_marker = object()
|
||||
|
||||
def prepare(self, publisher_id=_marker, retry=_marker):
|
||||
"""Return a specialized Notifier instance.
|
||||
|
||||
Returns a new Notifier instance with the supplied publisher_id. Allows
|
||||
sending notifications from multiple publisher_ids without the overhead
|
||||
of notification driver loading.
|
||||
|
||||
:param publisher_id: field in notifications sent, for example
|
||||
'compute.host1'
|
||||
:type publisher_id: str
|
||||
:param retry: an connection retries configuration
|
||||
None or -1 means to retry forever
|
||||
0 means no retry
|
||||
N means N retries
|
||||
:type retry: int
|
||||
"""
|
||||
return _SubNotifier._prepare(self, publisher_id, retry=retry)
|
||||
|
||||
def _notify(self, ctxt, event_type, payload, priority, publisher_id=None,
|
||||
retry=None):
|
||||
payload = self._serializer.serialize_entity(ctxt, payload)
|
||||
ctxt = self._serializer.serialize_context(ctxt)
|
||||
|
||||
msg = dict(message_id=six.text_type(uuid.uuid4()),
|
||||
publisher_id=publisher_id or self.publisher_id,
|
||||
event_type=event_type,
|
||||
priority=priority,
|
||||
payload=payload,
|
||||
timestamp=six.text_type(timeutils.utcnow()))
|
||||
|
||||
def do_notify(ext):
|
||||
try:
|
||||
ext.obj.notify(ctxt, msg, priority, retry or self.retry)
|
||||
except Exception as e:
|
||||
_LOG.exception("Problem '%(e)s' attempting to send to "
|
||||
"notification system. Payload=%(payload)s",
|
||||
dict(e=e, payload=payload))
|
||||
|
||||
if self._driver_mgr.extensions:
|
||||
self._driver_mgr.map(do_notify)
|
||||
|
||||
def audit(self, ctxt, event_type, payload):
|
||||
"""Send a notification at audit level.
|
||||
|
||||
:param ctxt: a request context dict
|
||||
:type ctxt: dict
|
||||
:param event_type: describes the event, for example
|
||||
'compute.create_instance'
|
||||
:type event_type: str
|
||||
:param payload: the notification payload
|
||||
:type payload: dict
|
||||
:raises: MessageDeliveryFailure
|
||||
"""
|
||||
self._notify(ctxt, event_type, payload, 'AUDIT')
|
||||
|
||||
def debug(self, ctxt, event_type, payload):
|
||||
"""Send a notification at debug level.
|
||||
|
||||
:param ctxt: a request context dict
|
||||
:type ctxt: dict
|
||||
:param event_type: describes the event, for example
|
||||
'compute.create_instance'
|
||||
:type event_type: str
|
||||
:param payload: the notification payload
|
||||
:type payload: dict
|
||||
:raises: MessageDeliveryFailure
|
||||
"""
|
||||
self._notify(ctxt, event_type, payload, 'DEBUG')
|
||||
|
||||
def info(self, ctxt, event_type, payload):
|
||||
"""Send a notification at info level.
|
||||
|
||||
:param ctxt: a request context dict
|
||||
:type ctxt: dict
|
||||
:param event_type: describes the event, for example
|
||||
'compute.create_instance'
|
||||
:type event_type: str
|
||||
:param payload: the notification payload
|
||||
:type payload: dict
|
||||
:raises: MessageDeliveryFailure
|
||||
"""
|
||||
self._notify(ctxt, event_type, payload, 'INFO')
|
||||
|
||||
def warn(self, ctxt, event_type, payload):
|
||||
"""Send a notification at warning level.
|
||||
|
||||
:param ctxt: a request context dict
|
||||
:type ctxt: dict
|
||||
:param event_type: describes the event, for example
|
||||
'compute.create_instance'
|
||||
:type event_type: str
|
||||
:param payload: the notification payload
|
||||
:type payload: dict
|
||||
:raises: MessageDeliveryFailure
|
||||
"""
|
||||
self._notify(ctxt, event_type, payload, 'WARN')
|
||||
|
||||
warning = warn
|
||||
|
||||
def error(self, ctxt, event_type, payload):
|
||||
"""Send a notification at error level.
|
||||
|
||||
:param ctxt: a request context dict
|
||||
:type ctxt: dict
|
||||
:param event_type: describes the event, for example
|
||||
'compute.create_instance'
|
||||
:type event_type: str
|
||||
:param payload: the notification payload
|
||||
:type payload: dict
|
||||
:raises: MessageDeliveryFailure
|
||||
"""
|
||||
self._notify(ctxt, event_type, payload, 'ERROR')
|
||||
|
||||
def critical(self, ctxt, event_type, payload):
|
||||
"""Send a notification at critical level.
|
||||
|
||||
:param ctxt: a request context dict
|
||||
:type ctxt: dict
|
||||
:param event_type: describes the event, for example
|
||||
'compute.create_instance'
|
||||
:type event_type: str
|
||||
:param payload: the notification payload
|
||||
:type payload: dict
|
||||
:raises: MessageDeliveryFailure
|
||||
"""
|
||||
self._notify(ctxt, event_type, payload, 'CRITICAL')
|
||||
|
||||
def sample(self, ctxt, event_type, payload):
|
||||
"""Send a notification at sample level.
|
||||
|
||||
Sample notifications are for high-frequency events
|
||||
that typically contain small payloads. eg: "CPU = 70%"
|
||||
|
||||
Not all drivers support the sample level
|
||||
(log, for example) so these could be dropped.
|
||||
|
||||
:param ctxt: a request context dict
|
||||
:type ctxt: dict
|
||||
:param event_type: describes the event, for example
|
||||
'compute.create_instance'
|
||||
:type event_type: str
|
||||
:param payload: the notification payload
|
||||
:type payload: dict
|
||||
:raises: MessageDeliveryFailure
|
||||
"""
|
||||
self._notify(ctxt, event_type, payload, 'SAMPLE')
|
||||
|
||||
|
||||
class _SubNotifier(Notifier):
|
||||
|
||||
_marker = Notifier._marker
|
||||
|
||||
def __init__(self, base, publisher_id, retry):
|
||||
self._base = base
|
||||
self.transport = base.transport
|
||||
self.publisher_id = publisher_id
|
||||
self.retry = retry
|
||||
|
||||
self._serializer = self._base._serializer
|
||||
self._driver_mgr = self._base._driver_mgr
|
||||
|
||||
def _notify(self, ctxt, event_type, payload, priority):
|
||||
super(_SubNotifier, self)._notify(ctxt, event_type, payload, priority)
|
||||
|
||||
@classmethod
|
||||
def _prepare(cls, base, publisher_id=_marker, retry=_marker):
|
||||
if publisher_id is cls._marker:
|
||||
publisher_id = base.publisher_id
|
||||
if retry is cls._marker:
|
||||
retry = base.retry
|
||||
return cls(base, publisher_id, retry=retry)
|
@ -20,18 +20,18 @@ __all__ = [
|
||||
import copy
|
||||
import itertools
|
||||
|
||||
from oslo.messaging._drivers import amqp
|
||||
from oslo.messaging._drivers import impl_qpid
|
||||
from oslo.messaging._drivers import impl_rabbit
|
||||
from oslo.messaging._drivers import impl_zmq
|
||||
from oslo.messaging._drivers import matchmaker
|
||||
from oslo.messaging._drivers import matchmaker_redis
|
||||
from oslo.messaging._drivers import matchmaker_ring
|
||||
from oslo.messaging._drivers.protocols.amqp import opts as amqp_opts
|
||||
from oslo.messaging._executors import base
|
||||
from oslo.messaging.notify import notifier
|
||||
from oslo.messaging.rpc import client
|
||||
from oslo.messaging import transport
|
||||
from oslo_messaging._drivers import amqp
|
||||
from oslo_messaging._drivers import impl_qpid
|
||||
from oslo_messaging._drivers import impl_rabbit
|
||||
from oslo_messaging._drivers import impl_zmq
|
||||
from oslo_messaging._drivers import matchmaker
|
||||
from oslo_messaging._drivers import matchmaker_redis
|
||||
from oslo_messaging._drivers import matchmaker_ring
|
||||
from oslo_messaging._drivers.protocols.amqp import opts as amqp_opts
|
||||
from oslo_messaging._executors import base
|
||||
from oslo_messaging.notify import notifier
|
||||
from oslo_messaging.rpc import client
|
||||
from oslo_messaging import transport
|
||||
|
||||
_global_opt_lists = [
|
||||
amqp.amqp_opts,
|
||||
@ -64,7 +64,7 @@ def list_opts():
|
||||
registered. A group name of None corresponds to the [DEFAULT] group in
|
||||
config files.
|
||||
|
||||
This function is also discoverable via the 'oslo.messaging' entry point
|
||||
This function is also discoverable via the 'oslo_messaging' entry point
|
||||
under the 'oslo.config.opts' namespace.
|
||||
|
||||
The purpose of this is to allow tools like the Oslo sample config file
|
32
oslo_messaging/rpc/__init__.py
Normal file
32
oslo_messaging/rpc/__init__.py
Normal file
@ -0,0 +1,32 @@
|
||||
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
__all__ = [
|
||||
'ClientSendError',
|
||||
'ExpectedException',
|
||||
'NoSuchMethod',
|
||||
'RPCClient',
|
||||
'RPCDispatcher',
|
||||
'RPCDispatcherError',
|
||||
'RPCVersionCapError',
|
||||
'RemoteError',
|
||||
'UnsupportedVersion',
|
||||
'expected_exceptions',
|
||||
'get_rpc_server',
|
||||
]
|
||||
|
||||
from .client import *
|
||||
from .dispatcher import *
|
||||
from .server import *
|
397
oslo_messaging/rpc/client.py
Normal file
397
oslo_messaging/rpc/client.py
Normal file
@ -0,0 +1,397 @@
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
__all__ = [
|
||||
'ClientSendError',
|
||||
'RPCClient',
|
||||
'RPCVersionCapError',
|
||||
'RemoteError',
|
||||
]
|
||||
|
||||
import six
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo_messaging._drivers import base as driver_base
|
||||
from oslo_messaging import _utils as utils
|
||||
from oslo_messaging import exceptions
|
||||
from oslo_messaging import serializer as msg_serializer
|
||||
|
||||
_client_opts = [
|
||||
cfg.IntOpt('rpc_response_timeout',
|
||||
default=60,
|
||||
help='Seconds to wait for a response from a call.'),
|
||||
]
|
||||
|
||||
|
||||
class RemoteError(exceptions.MessagingException):
|
||||
|
||||
"""Signifies that a remote endpoint method has raised an exception.
|
||||
|
||||
Contains a string representation of the type of the original exception,
|
||||
the value of the original exception, and the traceback. These are
|
||||
sent to the parent as a joined string so printing the exception
|
||||
contains all of the relevant info.
|
||||
"""
|
||||
|
||||
def __init__(self, exc_type=None, value=None, traceback=None):
|
||||
self.exc_type = exc_type
|
||||
self.value = value
|
||||
self.traceback = traceback
|
||||
msg = ("Remote error: %(exc_type)s %(value)s\n%(traceback)s." %
|
||||
dict(exc_type=self.exc_type, value=self.value,
|
||||
traceback=self.traceback))
|
||||
super(RemoteError, self).__init__(msg)
|
||||
|
||||
|
||||
class RPCVersionCapError(exceptions.MessagingException):
|
||||
|
||||
def __init__(self, version, version_cap):
|
||||
self.version = version
|
||||
self.version_cap = version_cap
|
||||
msg = ("Requested message version, %(version)s is too high. It needs "
|
||||
"to be lower than the specified version cap %(version_cap)s." %
|
||||
dict(version=self.version, version_cap=self.version_cap))
|
||||
super(RPCVersionCapError, self).__init__(msg)
|
||||
|
||||
|
||||
class ClientSendError(exceptions.MessagingException):
|
||||
"""Raised if we failed to send a message to a target."""
|
||||
|
||||
def __init__(self, target, ex):
|
||||
msg = 'Failed to send to target "%s": %s' % (target, ex)
|
||||
super(ClientSendError, self).__init__(msg)
|
||||
self.target = target
|
||||
self.ex = ex
|
||||
|
||||
|
||||
class _CallContext(object):
|
||||
|
||||
_marker = object()
|
||||
|
||||
def __init__(self, transport, target, serializer,
|
||||
timeout=None, version_cap=None, retry=None):
|
||||
self.conf = transport.conf
|
||||
|
||||
self.transport = transport
|
||||
self.target = target
|
||||
self.serializer = serializer
|
||||
self.timeout = timeout
|
||||
self.retry = retry
|
||||
self.version_cap = version_cap
|
||||
|
||||
super(_CallContext, self).__init__()
|
||||
|
||||
def _make_message(self, ctxt, method, args):
|
||||
msg = dict(method=method)
|
||||
|
||||
msg['args'] = dict()
|
||||
for argname, arg in six.iteritems(args):
|
||||
msg['args'][argname] = self.serializer.serialize_entity(ctxt, arg)
|
||||
|
||||
if self.target.namespace is not None:
|
||||
msg['namespace'] = self.target.namespace
|
||||
if self.target.version is not None:
|
||||
msg['version'] = self.target.version
|
||||
|
||||
return msg
|
||||
|
||||
def _check_version_cap(self, version):
|
||||
if not utils.version_is_compatible(self.version_cap, version):
|
||||
raise RPCVersionCapError(version=version,
|
||||
version_cap=self.version_cap)
|
||||
|
||||
def can_send_version(self, version=_marker):
|
||||
"""Check to see if a version is compatible with the version cap."""
|
||||
version = self.target.version if version is self._marker else version
|
||||
return (not self.version_cap or
|
||||
utils.version_is_compatible(self.version_cap,
|
||||
self.target.version))
|
||||
|
||||
def cast(self, ctxt, method, **kwargs):
|
||||
"""Invoke a method and return immediately. See RPCClient.cast()."""
|
||||
msg = self._make_message(ctxt, method, kwargs)
|
||||
ctxt = self.serializer.serialize_context(ctxt)
|
||||
|
||||
if self.version_cap:
|
||||
self._check_version_cap(msg.get('version'))
|
||||
try:
|
||||
self.transport._send(self.target, ctxt, msg, retry=self.retry)
|
||||
except driver_base.TransportDriverError as ex:
|
||||
raise ClientSendError(self.target, ex)
|
||||
|
||||
def call(self, ctxt, method, **kwargs):
|
||||
"""Invoke a method and wait for a reply. See RPCClient.call()."""
|
||||
if self.target.fanout:
|
||||
raise exceptions.InvalidTarget('A call cannot be used with fanout',
|
||||
self.target)
|
||||
|
||||
msg = self._make_message(ctxt, method, kwargs)
|
||||
msg_ctxt = self.serializer.serialize_context(ctxt)
|
||||
|
||||
timeout = self.timeout
|
||||
if self.timeout is None:
|
||||
timeout = self.conf.rpc_response_timeout
|
||||
|
||||
if self.version_cap:
|
||||
self._check_version_cap(msg.get('version'))
|
||||
|
||||
try:
|
||||
result = self.transport._send(self.target, msg_ctxt, msg,
|
||||
wait_for_reply=True, timeout=timeout,
|
||||
retry=self.retry)
|
||||
except driver_base.TransportDriverError as ex:
|
||||
raise ClientSendError(self.target, ex)
|
||||
return self.serializer.deserialize_entity(ctxt, result)
|
||||
|
||||
@classmethod
|
||||
def _prepare(cls, base,
|
||||
exchange=_marker, topic=_marker, namespace=_marker,
|
||||
version=_marker, server=_marker, fanout=_marker,
|
||||
timeout=_marker, version_cap=_marker, retry=_marker):
|
||||
"""Prepare a method invocation context. See RPCClient.prepare()."""
|
||||
kwargs = dict(
|
||||
exchange=exchange,
|
||||
topic=topic,
|
||||
namespace=namespace,
|
||||
version=version,
|
||||
server=server,
|
||||
fanout=fanout)
|
||||
kwargs = dict([(k, v) for k, v in kwargs.items()
|
||||
if v is not cls._marker])
|
||||
target = base.target(**kwargs)
|
||||
|
||||
if timeout is cls._marker:
|
||||
timeout = base.timeout
|
||||
if retry is cls._marker:
|
||||
retry = base.retry
|
||||
if version_cap is cls._marker:
|
||||
version_cap = base.version_cap
|
||||
|
||||
return _CallContext(base.transport, target,
|
||||
base.serializer,
|
||||
timeout, version_cap, retry)
|
||||
|
||||
def prepare(self, exchange=_marker, topic=_marker, namespace=_marker,
|
||||
version=_marker, server=_marker, fanout=_marker,
|
||||
timeout=_marker, version_cap=_marker, retry=_marker):
|
||||
"""Prepare a method invocation context. See RPCClient.prepare()."""
|
||||
return self._prepare(self,
|
||||
exchange, topic, namespace,
|
||||
version, server, fanout,
|
||||
timeout, version_cap, retry)
|
||||
|
||||
|
||||
class RPCClient(object):
|
||||
|
||||
"""A class for invoking methods on remote servers.
|
||||
|
||||
The RPCClient class is responsible for sending method invocations to remote
|
||||
servers via a messaging transport.
|
||||
|
||||
A default target is supplied to the RPCClient constructor, but target
|
||||
attributes can be overridden for individual method invocations using the
|
||||
prepare() method.
|
||||
|
||||
A method invocation consists of a request context dictionary, a method name
|
||||
and a dictionary of arguments. A cast() invocation just sends the request
|
||||
and returns immediately. A call() invocation waits for the server to send
|
||||
a return value.
|
||||
|
||||
This class is intended to be used by wrapping it in another class which
|
||||
provides methods on the subclass to perform the remote invocation using
|
||||
call() or cast()::
|
||||
|
||||
class TestClient(object):
|
||||
|
||||
def __init__(self, transport):
|
||||
target = messaging.Target(topic='testtopic', version='2.0')
|
||||
self._client = messaging.RPCClient(transport, target)
|
||||
|
||||
def test(self, ctxt, arg):
|
||||
return self._client.call(ctxt, 'test', arg=arg)
|
||||
|
||||
An example of using the prepare() method to override some attributes of the
|
||||
default target::
|
||||
|
||||
def test(self, ctxt, arg):
|
||||
cctxt = self._client.prepare(version='2.5')
|
||||
return cctxt.call(ctxt, 'test', arg=arg)
|
||||
|
||||
RPCClient have a number of other properties - for example, timeout and
|
||||
version_cap - which may make sense to override for some method invocations,
|
||||
so they too can be passed to prepare()::
|
||||
|
||||
def test(self, ctxt, arg):
|
||||
cctxt = self._client.prepare(timeout=10)
|
||||
return cctxt.call(ctxt, 'test', arg=arg)
|
||||
|
||||
However, this class can be used directly without wrapping it another class.
|
||||
For example::
|
||||
|
||||
transport = messaging.get_transport(cfg.CONF)
|
||||
target = messaging.Target(topic='testtopic', version='2.0')
|
||||
client = messaging.RPCClient(transport, target)
|
||||
client.call(ctxt, 'test', arg=arg)
|
||||
|
||||
but this is probably only useful in limited circumstances as a wrapper
|
||||
class will usually help to make the code much more obvious.
|
||||
|
||||
By default, cast() and call() will block until the message is successfully
|
||||
sent. However, the retry parameter can be used to have message sending
|
||||
fail with a MessageDeliveryFailure after the given number of retries. For
|
||||
example::
|
||||
|
||||
client = messaging.RPCClient(transport, target, retry=None)
|
||||
client.call(ctxt, 'sync')
|
||||
try:
|
||||
client.prepare(retry=0).cast(ctxt, 'ping')
|
||||
except messaging.MessageDeliveryFailure:
|
||||
LOG.error("Failed to send ping message")
|
||||
"""
|
||||
|
||||
def __init__(self, transport, target,
|
||||
timeout=None, version_cap=None, serializer=None, retry=None):
|
||||
"""Construct an RPC client.
|
||||
|
||||
:param transport: a messaging transport handle
|
||||
:type transport: Transport
|
||||
:param target: the default target for invocations
|
||||
:type target: Target
|
||||
:param timeout: an optional default timeout (in seconds) for call()s
|
||||
:type timeout: int or float
|
||||
:param version_cap: raise a RPCVersionCapError version exceeds this cap
|
||||
:type version_cap: str
|
||||
:param serializer: an optional entity serializer
|
||||
:type serializer: Serializer
|
||||
:param retry: an optional default connection retries configuration
|
||||
None or -1 means to retry forever
|
||||
0 means no retry
|
||||
N means N retries
|
||||
:type retry: int
|
||||
"""
|
||||
self.conf = transport.conf
|
||||
self.conf.register_opts(_client_opts)
|
||||
|
||||
self.transport = transport
|
||||
self.target = target
|
||||
self.timeout = timeout
|
||||
self.retry = retry
|
||||
self.version_cap = version_cap
|
||||
self.serializer = serializer or msg_serializer.NoOpSerializer()
|
||||
|
||||
super(RPCClient, self).__init__()
|
||||
|
||||
_marker = _CallContext._marker
|
||||
|
||||
def prepare(self, exchange=_marker, topic=_marker, namespace=_marker,
|
||||
version=_marker, server=_marker, fanout=_marker,
|
||||
timeout=_marker, version_cap=_marker, retry=_marker):
|
||||
"""Prepare a method invocation context.
|
||||
|
||||
Use this method to override client properties for an individual method
|
||||
invocation. For example::
|
||||
|
||||
def test(self, ctxt, arg):
|
||||
cctxt = self.prepare(version='2.5')
|
||||
return cctxt.call(ctxt, 'test', arg=arg)
|
||||
|
||||
:param exchange: see Target.exchange
|
||||
:type exchange: str
|
||||
:param topic: see Target.topic
|
||||
:type topic: str
|
||||
:param namespace: see Target.namespace
|
||||
:type namespace: str
|
||||
:param version: requirement the server must support, see Target.version
|
||||
:type version: str
|
||||
:param server: send to a specific server, see Target.server
|
||||
:type server: str
|
||||
:param fanout: send to all servers on topic, see Target.fanout
|
||||
:type fanout: bool
|
||||
:param timeout: an optional default timeout (in seconds) for call()s
|
||||
:type timeout: int or float
|
||||
:param version_cap: raise a RPCVersionCapError version exceeds this cap
|
||||
:type version_cap: str
|
||||
:param retry: an optional connection retries configuration
|
||||
None or -1 means to retry forever
|
||||
0 means no retry
|
||||
N means N retries
|
||||
:type retry: int
|
||||
"""
|
||||
return _CallContext._prepare(self,
|
||||
exchange, topic, namespace,
|
||||
version, server, fanout,
|
||||
timeout, version_cap, retry)
|
||||
|
||||
def cast(self, ctxt, method, **kwargs):
|
||||
"""Invoke a method and return immediately.
|
||||
|
||||
Method arguments must either be primitive types or types supported by
|
||||
the client's serializer (if any).
|
||||
|
||||
Similarly, the request context must be a dict unless the client's
|
||||
serializer supports serializing another type.
|
||||
|
||||
:param ctxt: a request context dict
|
||||
:type ctxt: dict
|
||||
:param method: the method name
|
||||
:type method: str
|
||||
:param kwargs: a dict of method arguments
|
||||
:type kwargs: dict
|
||||
:raises: MessageDeliveryFailure
|
||||
"""
|
||||
self.prepare().cast(ctxt, method, **kwargs)
|
||||
|
||||
def call(self, ctxt, method, **kwargs):
|
||||
"""Invoke a method and wait for a reply.
|
||||
|
||||
Method arguments must either be primitive types or types supported by
|
||||
the client's serializer (if any). Similarly, the request context must
|
||||
be a dict unless the client's serializer supports serializing another
|
||||
type.
|
||||
|
||||
The semantics of how any errors raised by the remote RPC endpoint
|
||||
method are handled are quite subtle.
|
||||
|
||||
Firstly, if the remote exception is contained in one of the modules
|
||||
listed in the allow_remote_exmods messaging.get_transport() parameter,
|
||||
then it this exception will be re-raised by call(). However, such
|
||||
locally re-raised remote exceptions are distinguishable from the same
|
||||
exception type raised locally because re-raised remote exceptions are
|
||||
modified such that their class name ends with the '_Remote' suffix so
|
||||
you may do::
|
||||
|
||||
if ex.__class__.__name__.endswith('_Remote'):
|
||||
# Some special case for locally re-raised remote exceptions
|
||||
|
||||
Secondly, if a remote exception is not from a module listed in the
|
||||
allowed_remote_exmods list, then a messaging.RemoteError exception is
|
||||
raised with all details of the remote exception.
|
||||
|
||||
:param ctxt: a request context dict
|
||||
:type ctxt: dict
|
||||
:param method: the method name
|
||||
:type method: str
|
||||
:param kwargs: a dict of method arguments
|
||||
:type kwargs: dict
|
||||
:raises: MessagingTimeout, RemoteError, MessageDeliveryFailure
|
||||
"""
|
||||
return self.prepare().call(ctxt, method, **kwargs)
|
||||
|
||||
def can_send_version(self, version=_marker):
|
||||
"""Check to see if a version is compatible with the version cap."""
|
||||
return self.prepare(version=version).can_send_version()
|
195
oslo_messaging/rpc/dispatcher.py
Normal file
195
oslo_messaging/rpc/dispatcher.py
Normal file
@ -0,0 +1,195 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
# Copyright 2013 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
__all__ = [
|
||||
'NoSuchMethod',
|
||||
'RPCDispatcher',
|
||||
'RPCDispatcherError',
|
||||
'UnsupportedVersion',
|
||||
'ExpectedException',
|
||||
]
|
||||
|
||||
import contextlib
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import six
|
||||
|
||||
from oslo_messaging._i18n import _
|
||||
from oslo_messaging import _utils as utils
|
||||
from oslo_messaging import localcontext
|
||||
from oslo_messaging import serializer as msg_serializer
|
||||
from oslo_messaging import server as msg_server
|
||||
from oslo_messaging import target as msg_target
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ExpectedException(Exception):
|
||||
"""Encapsulates an expected exception raised by an RPC endpoint
|
||||
|
||||
Merely instantiating this exception records the current exception
|
||||
information, which will be passed back to the RPC client without
|
||||
exceptional logging.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.exc_info = sys.exc_info()
|
||||
|
||||
|
||||
class RPCDispatcherError(msg_server.MessagingServerError):
|
||||
"A base class for all RPC dispatcher exceptions."
|
||||
|
||||
|
||||
class NoSuchMethod(RPCDispatcherError, AttributeError):
|
||||
"Raised if there is no endpoint which exposes the requested method."
|
||||
|
||||
def __init__(self, method):
|
||||
msg = "Endpoint does not support RPC method %s" % method
|
||||
super(NoSuchMethod, self).__init__(msg)
|
||||
self.method = method
|
||||
|
||||
|
||||
class UnsupportedVersion(RPCDispatcherError):
|
||||
"Raised if there is no endpoint which supports the requested version."
|
||||
|
||||
def __init__(self, version, method=None):
|
||||
msg = "Endpoint does not support RPC version %s" % version
|
||||
if method:
|
||||
msg = "%s. Attempted method: %s" % (msg, method)
|
||||
super(UnsupportedVersion, self).__init__(msg)
|
||||
self.version = version
|
||||
self.method = method
|
||||
|
||||
|
||||
class RPCDispatcher(object):
|
||||
"""A message dispatcher which understands RPC messages.
|
||||
|
||||
A MessageHandlingServer is constructed by passing a callable dispatcher
|
||||
which is invoked with context and message dictionaries each time a message
|
||||
is received.
|
||||
|
||||
RPCDispatcher is one such dispatcher which understands the format of RPC
|
||||
messages. The dispatcher looks at the namespace, version and method values
|
||||
in the message and matches those against a list of available endpoints.
|
||||
|
||||
Endpoints may have a target attribute describing the namespace and version
|
||||
of the methods exposed by that object. All public methods on an endpoint
|
||||
object are remotely invokable by clients.
|
||||
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, target, endpoints, serializer):
|
||||
"""Construct a rpc server dispatcher.
|
||||
|
||||
:param target: the exchange, topic and server to listen on
|
||||
:type target: Target
|
||||
"""
|
||||
|
||||
self.endpoints = endpoints
|
||||
self.serializer = serializer or msg_serializer.NoOpSerializer()
|
||||
self._default_target = msg_target.Target()
|
||||
self._target = target
|
||||
|
||||
def _listen(self, transport):
|
||||
return transport._listen(self._target)
|
||||
|
||||
@staticmethod
|
||||
def _is_namespace(target, namespace):
|
||||
return namespace == target.namespace
|
||||
|
||||
@staticmethod
|
||||
def _is_compatible(target, version):
|
||||
endpoint_version = target.version or '1.0'
|
||||
return utils.version_is_compatible(endpoint_version, version)
|
||||
|
||||
def _do_dispatch(self, endpoint, method, ctxt, args, executor_callback):
|
||||
ctxt = self.serializer.deserialize_context(ctxt)
|
||||
new_args = dict()
|
||||
for argname, arg in six.iteritems(args):
|
||||
new_args[argname] = self.serializer.deserialize_entity(ctxt, arg)
|
||||
func = getattr(endpoint, method)
|
||||
if executor_callback:
|
||||
result = executor_callback(func, ctxt, **new_args)
|
||||
else:
|
||||
result = func(ctxt, **new_args)
|
||||
return self.serializer.serialize_entity(ctxt, result)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def __call__(self, incoming, executor_callback=None):
|
||||
incoming.acknowledge()
|
||||
yield lambda: self._dispatch_and_reply(incoming, executor_callback)
|
||||
|
||||
def _dispatch_and_reply(self, incoming, executor_callback):
|
||||
try:
|
||||
incoming.reply(self._dispatch(incoming.ctxt,
|
||||
incoming.message,
|
||||
executor_callback))
|
||||
except ExpectedException as e:
|
||||
LOG.debug(u'Expected exception during message handling (%s)',
|
||||
e.exc_info[1])
|
||||
incoming.reply(failure=e.exc_info, log_failure=False)
|
||||
except Exception as e:
|
||||
# sys.exc_info() is deleted by LOG.exception().
|
||||
exc_info = sys.exc_info()
|
||||
LOG.error(_('Exception during message handling: %s'), e,
|
||||
exc_info=exc_info)
|
||||
incoming.reply(failure=exc_info)
|
||||
# NOTE(dhellmann): Remove circular object reference
|
||||
# between the current stack frame and the traceback in
|
||||
# exc_info.
|
||||
del exc_info
|
||||
|
||||
def _dispatch(self, ctxt, message, executor_callback=None):
|
||||
"""Dispatch an RPC message to the appropriate endpoint method.
|
||||
|
||||
:param ctxt: the request context
|
||||
:type ctxt: dict
|
||||
:param message: the message payload
|
||||
:type message: dict
|
||||
:raises: NoSuchMethod, UnsupportedVersion
|
||||
"""
|
||||
method = message.get('method')
|
||||
args = message.get('args', {})
|
||||
namespace = message.get('namespace')
|
||||
version = message.get('version', '1.0')
|
||||
|
||||
found_compatible = False
|
||||
for endpoint in self.endpoints:
|
||||
target = getattr(endpoint, 'target', None)
|
||||
if not target:
|
||||
target = self._default_target
|
||||
|
||||
if not (self._is_namespace(target, namespace) and
|
||||
self._is_compatible(target, version)):
|
||||
continue
|
||||
|
||||
if hasattr(endpoint, method):
|
||||
localcontext.set_local_context(ctxt)
|
||||
try:
|
||||
return self._do_dispatch(endpoint, method, ctxt, args,
|
||||
executor_callback)
|
||||
finally:
|
||||
localcontext.clear_local_context()
|
||||
|
||||
found_compatible = True
|
||||
|
||||
if found_compatible:
|
||||
raise NoSuchMethod(method)
|
||||
else:
|
||||
raise UnsupportedVersion(version, method=method)
|
152
oslo_messaging/rpc/server.py
Normal file
152
oslo_messaging/rpc/server.py
Normal file
@ -0,0 +1,152 @@
|
||||
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
An RPC server exposes a number of endpoints, each of which contain a set of
|
||||
methods which may be invoked remotely by clients over a given transport.
|
||||
|
||||
To create an RPC server, you supply a transport, target and a list of
|
||||
endpoints.
|
||||
|
||||
A transport can be obtained simply by calling the get_transport() method::
|
||||
|
||||
transport = messaging.get_transport(conf)
|
||||
|
||||
which will load the appropriate transport driver according to the user's
|
||||
messaging configuration configuration. See get_transport() for more details.
|
||||
|
||||
The target supplied when creating an RPC server expresses the topic, server
|
||||
name and - optionally - the exchange to listen on. See Target for more details
|
||||
on these attributes.
|
||||
|
||||
Each endpoint object may have a target attribute which may have namespace and
|
||||
version fields set. By default, we use the 'null namespace' and version 1.0.
|
||||
Incoming method calls will be dispatched to the first endpoint with the
|
||||
requested method, a matching namespace and a compatible version number.
|
||||
|
||||
RPC servers have start(), stop() and wait() messages to begin handling
|
||||
requests, stop handling requests and wait for all in-process requests to
|
||||
complete.
|
||||
|
||||
A simple example of an RPC server with multiple endpoints might be::
|
||||
|
||||
from oslo.config import cfg
|
||||
import oslo_messaging
|
||||
|
||||
class ServerControlEndpoint(object):
|
||||
|
||||
target = oslo_messaging.Target(namespace='control',
|
||||
version='2.0')
|
||||
|
||||
def __init__(self, server):
|
||||
self.server = server
|
||||
|
||||
def stop(self, ctx):
|
||||
if server:
|
||||
self.server.stop()
|
||||
|
||||
class TestEndpoint(object):
|
||||
|
||||
def test(self, ctx, arg):
|
||||
return arg
|
||||
|
||||
transport = oslo_messaging.get_transport(cfg.CONF)
|
||||
target = oslo_messaging.Target(topic='test', server='server1')
|
||||
endpoints = [
|
||||
ServerControlEndpoint(None),
|
||||
TestEndpoint(),
|
||||
]
|
||||
server = oslo_messaging.get_rpc_server(transport, target, endpoints,
|
||||
executor='blocking')
|
||||
server.start()
|
||||
server.wait()
|
||||
|
||||
Clients can invoke methods on the server by sending the request to a topic and
|
||||
it gets sent to one of the servers listening on the topic, or by sending the
|
||||
request to a specific server listening on the topic, or by sending the request
|
||||
to all servers listening on the topic (known as fanout). These modes are chosen
|
||||
via the server and fanout attributes on Target but the mode used is transparent
|
||||
to the server.
|
||||
|
||||
The first parameter to method invocations is always the request context
|
||||
supplied by the client.
|
||||
|
||||
Parameters to the method invocation are primitive types and so must be the
|
||||
return values from the methods. By supplying a serializer object, a server can
|
||||
deserialize a request context and arguments from - and serialize return values
|
||||
to - primitive types.
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
'get_rpc_server',
|
||||
'expected_exceptions',
|
||||
]
|
||||
|
||||
from oslo_messaging.rpc import dispatcher as rpc_dispatcher
|
||||
from oslo_messaging import server as msg_server
|
||||
|
||||
|
||||
def get_rpc_server(transport, target, endpoints,
|
||||
executor='blocking', serializer=None):
|
||||
"""Construct an RPC server.
|
||||
|
||||
The executor parameter controls how incoming messages will be received and
|
||||
dispatched. By default, the most simple executor is used - the blocking
|
||||
executor.
|
||||
|
||||
If the eventlet executor is used, the threading and time library need to be
|
||||
monkeypatched.
|
||||
|
||||
:param transport: the messaging transport
|
||||
:type transport: Transport
|
||||
:param target: the exchange, topic and server to listen on
|
||||
:type target: Target
|
||||
:param endpoints: a list of endpoint objects
|
||||
:type endpoints: list
|
||||
:param executor: name of a message executor - for example
|
||||
'eventlet', 'blocking'
|
||||
:type executor: str
|
||||
:param serializer: an optional entity serializer
|
||||
:type serializer: Serializer
|
||||
"""
|
||||
dispatcher = rpc_dispatcher.RPCDispatcher(target, endpoints, serializer)
|
||||
return msg_server.MessageHandlingServer(transport, dispatcher, executor)
|
||||
|
||||
|
||||
def expected_exceptions(*exceptions):
|
||||
"""Decorator for RPC endpoint methods that raise expected exceptions.
|
||||
|
||||
Marking an endpoint method with this decorator allows the declaration
|
||||
of expected exceptions that the RPC server should not consider fatal,
|
||||
and not log as if they were generated in a real error scenario.
|
||||
|
||||
Note that this will cause listed exceptions to be wrapped in an
|
||||
ExpectedException, which is used internally by the RPC sever. The RPC
|
||||
client will see the original exception type.
|
||||
"""
|
||||
def outer(func):
|
||||
def inner(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
# Take advantage of the fact that we can catch
|
||||
# multiple exception types using a tuple of
|
||||
# exception classes, with subclass detection
|
||||
# for free. Any exception that is not in or
|
||||
# derived from the args passed to us will be
|
||||
# ignored and thrown as normal.
|
||||
except exceptions:
|
||||
raise rpc_dispatcher.ExpectedException()
|
||||
return inner
|
||||
return outer
|
76
oslo_messaging/serializer.py
Normal file
76
oslo_messaging/serializer.py
Normal file
@ -0,0 +1,76 @@
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
__all__ = ['Serializer', 'NoOpSerializer']
|
||||
|
||||
"""Provides the definition of a message serialization handler"""
|
||||
|
||||
import abc
|
||||
|
||||
import six
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class Serializer(object):
|
||||
"""Generic (de-)serialization definition base class."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def serialize_entity(self, ctxt, entity):
|
||||
"""Serialize something to primitive form.
|
||||
|
||||
:param ctxt: Request context, in deserialized form
|
||||
:param entity: Entity to be serialized
|
||||
:returns: Serialized form of entity
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def deserialize_entity(self, ctxt, entity):
|
||||
"""Deserialize something from primitive form.
|
||||
|
||||
:param ctxt: Request context, in deserialized form
|
||||
:param entity: Primitive to be deserialized
|
||||
:returns: Deserialized form of entity
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def serialize_context(self, ctxt):
|
||||
"""Serialize a request context into a dictionary.
|
||||
|
||||
:param ctxt: Request context
|
||||
:returns: Serialized form of context
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def deserialize_context(self, ctxt):
|
||||
"""Deserialize a dictionary into a request context.
|
||||
|
||||
:param ctxt: Request context dictionary
|
||||
:returns: Deserialized form of entity
|
||||
"""
|
||||
|
||||
|
||||
class NoOpSerializer(Serializer):
|
||||
"""A serializer that does nothing."""
|
||||
|
||||
def serialize_entity(self, ctxt, entity):
|
||||
return entity
|
||||
|
||||
def deserialize_entity(self, ctxt, entity):
|
||||
return entity
|
||||
|
||||
def serialize_context(self, ctxt):
|
||||
return ctxt
|
||||
|
||||
def deserialize_context(self, ctxt):
|
||||
return ctxt
|
150
oslo_messaging/server.py
Normal file
150
oslo_messaging/server.py
Normal file
@ -0,0 +1,150 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
# Copyright 2013 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
__all__ = [
|
||||
'ExecutorLoadFailure',
|
||||
'MessageHandlingServer',
|
||||
'MessagingServerError',
|
||||
'ServerListenError',
|
||||
]
|
||||
|
||||
from stevedore import driver
|
||||
|
||||
from oslo_messaging._drivers import base as driver_base
|
||||
from oslo_messaging import exceptions
|
||||
|
||||
|
||||
class MessagingServerError(exceptions.MessagingException):
|
||||
"""Base class for all MessageHandlingServer exceptions."""
|
||||
|
||||
|
||||
class ExecutorLoadFailure(MessagingServerError):
|
||||
"""Raised if an executor can't be loaded."""
|
||||
|
||||
def __init__(self, executor, ex):
|
||||
msg = 'Failed to load executor "%s": %s' % (executor, ex)
|
||||
super(ExecutorLoadFailure, self).__init__(msg)
|
||||
self.executor = executor
|
||||
self.ex = ex
|
||||
|
||||
|
||||
class ServerListenError(MessagingServerError):
|
||||
"""Raised if we failed to listen on a target."""
|
||||
|
||||
def __init__(self, target, ex):
|
||||
msg = 'Failed to listen on target "%s": %s' % (target, ex)
|
||||
super(ServerListenError, self).__init__(msg)
|
||||
self.target = target
|
||||
self.ex = ex
|
||||
|
||||
|
||||
class MessageHandlingServer(object):
|
||||
"""Server for handling messages.
|
||||
|
||||
Connect a transport to a dispatcher that knows how to process the
|
||||
message using an executor that knows how the app wants to create
|
||||
new tasks.
|
||||
"""
|
||||
|
||||
def __init__(self, transport, dispatcher, executor='blocking'):
|
||||
"""Construct a message handling server.
|
||||
|
||||
The dispatcher parameter is a callable which is invoked with context
|
||||
and message dictionaries each time a message is received.
|
||||
|
||||
The executor parameter controls how incoming messages will be received
|
||||
and dispatched. By default, the most simple executor is used - the
|
||||
blocking executor.
|
||||
|
||||
:param transport: the messaging transport
|
||||
:type transport: Transport
|
||||
:param dispatcher: a callable which is invoked for each method
|
||||
:type dispatcher: callable
|
||||
:param executor: name of message executor - for example
|
||||
'eventlet', 'blocking'
|
||||
:type executor: str
|
||||
"""
|
||||
self.conf = transport.conf
|
||||
|
||||
self.transport = transport
|
||||
self.dispatcher = dispatcher
|
||||
self.executor = executor
|
||||
|
||||
try:
|
||||
mgr = driver.DriverManager('oslo.messaging.executors',
|
||||
self.executor)
|
||||
except RuntimeError as ex:
|
||||
raise ExecutorLoadFailure(self.executor, ex)
|
||||
else:
|
||||
self._executor_cls = mgr.driver
|
||||
self._executor = None
|
||||
|
||||
super(MessageHandlingServer, self).__init__()
|
||||
|
||||
def start(self):
|
||||
"""Start handling incoming messages.
|
||||
|
||||
This method causes the server to begin polling the transport for
|
||||
incoming messages and passing them to the dispatcher. Message
|
||||
processing will continue until the stop() method is called.
|
||||
|
||||
The executor controls how the server integrates with the applications
|
||||
I/O handling strategy - it may choose to poll for messages in a new
|
||||
process, thread or co-operatively scheduled coroutine or simply by
|
||||
registering a callback with an event loop. Similarly, the executor may
|
||||
choose to dispatch messages in a new thread, coroutine or simply the
|
||||
current thread.
|
||||
"""
|
||||
if self._executor is not None:
|
||||
return
|
||||
try:
|
||||
listener = self.dispatcher._listen(self.transport)
|
||||
except driver_base.TransportDriverError as ex:
|
||||
raise ServerListenError(self.target, ex)
|
||||
|
||||
self._executor = self._executor_cls(self.conf, listener,
|
||||
self.dispatcher)
|
||||
self._executor.start()
|
||||
|
||||
def stop(self):
|
||||
"""Stop handling incoming messages.
|
||||
|
||||
Once this method returns, no new incoming messages will be handled by
|
||||
the server. However, the server may still be in the process of handling
|
||||
some messages, and underlying driver resources associated to this
|
||||
server are still in use. See 'wait' for more details.
|
||||
"""
|
||||
if self._executor is not None:
|
||||
self._executor.stop()
|
||||
|
||||
def wait(self):
|
||||
"""Wait for message processing to complete.
|
||||
|
||||
After calling stop(), there may still be some some existing messages
|
||||
which have not been completely processed. The wait() method blocks
|
||||
until all message processing has completed.
|
||||
|
||||
Once it's finished, the underlying driver resources associated to this
|
||||
server are released (like closing useless network connections).
|
||||
"""
|
||||
if self._executor is not None:
|
||||
self._executor.wait()
|
||||
# Close listener connection after processing all messages
|
||||
self._executor.listener.cleanup()
|
||||
|
||||
self._executor = None
|
94
oslo_messaging/target.py
Normal file
94
oslo_messaging/target.py
Normal file
@ -0,0 +1,94 @@
|
||||
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class Target(object):
|
||||
|
||||
"""Identifies the destination of messages.
|
||||
|
||||
A Target encapsulates all the information to identify where a message
|
||||
should be sent or what messages a server is listening for.
|
||||
|
||||
Different subsets of the information encapsulated in a Target object is
|
||||
relevant to various aspects of the API:
|
||||
|
||||
creating a server:
|
||||
topic and server is required; exchange is optional
|
||||
an endpoint's target:
|
||||
namespace and version is optional
|
||||
client sending a message:
|
||||
topic is required, all other attributes optional
|
||||
|
||||
Its attributes are:
|
||||
|
||||
:param exchange: A scope for topics. Leave unspecified to default to the
|
||||
control_exchange configuration option.
|
||||
:type exchange: str
|
||||
:param topic: A name which identifies the set of interfaces exposed by a
|
||||
server. Multiple servers may listen on a topic and messages will be
|
||||
dispatched to one of the servers in a round-robin fashion.
|
||||
:type topic: str
|
||||
:param namespace: Identifies a particular interface (i.e. set of methods)
|
||||
exposed by a server. The default interface has no namespace identifier
|
||||
and is referred to as the null namespace.
|
||||
:type namespace: str
|
||||
:param version: Interfaces have a major.minor version number associated
|
||||
with them. A minor number increment indicates a backwards compatible
|
||||
change and an incompatible change is indicated by a major number bump.
|
||||
Servers may implement multiple major versions and clients may require
|
||||
indicate that their message requires a particular minimum minor version.
|
||||
:type version: str
|
||||
:param server: Clients can request that a message be directed to a specific
|
||||
server, rather than just one of a pool of servers listening on the topic.
|
||||
:type server: str
|
||||
:param fanout: Clients may request that a message be directed to all
|
||||
servers listening on a topic by setting fanout to ``True``, rather than
|
||||
just one of them.
|
||||
:type fanout: bool
|
||||
"""
|
||||
|
||||
def __init__(self, exchange=None, topic=None, namespace=None,
|
||||
version=None, server=None, fanout=None):
|
||||
self.exchange = exchange
|
||||
self.topic = topic
|
||||
self.namespace = namespace
|
||||
self.version = version
|
||||
self.server = server
|
||||
self.fanout = fanout
|
||||
|
||||
def __call__(self, **kwargs):
|
||||
for a in ('exchange', 'topic', 'namespace',
|
||||
'version', 'server', 'fanout'):
|
||||
kwargs.setdefault(a, getattr(self, a))
|
||||
return Target(**kwargs)
|
||||
|
||||
def __eq__(self, other):
|
||||
return vars(self) == vars(other)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
||||
|
||||
def __repr__(self):
|
||||
attrs = []
|
||||
for a in ['exchange', 'topic', 'namespace',
|
||||
'version', 'server', 'fanout']:
|
||||
v = getattr(self, a)
|
||||
if v:
|
||||
attrs.append((a, v))
|
||||
values = ', '.join(['%s=%s' % i for i in attrs])
|
||||
return '<Target ' + values + '>'
|
||||
|
||||
def __hash__(self):
|
||||
return id(self)
|
1
oslo_messaging/tests/__init__.py
Normal file
1
oslo_messaging/tests/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
|
841
oslo_messaging/tests/drivers/test_impl_qpid.py
Normal file
841
oslo_messaging/tests/drivers/test_impl_qpid.py
Normal file
@ -0,0 +1,841 @@
|
||||
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import operator
|
||||
import random
|
||||
import threading
|
||||
import time
|
||||
|
||||
import mock
|
||||
try:
|
||||
import qpid
|
||||
except ImportError:
|
||||
qpid = None
|
||||
from six.moves import _thread
|
||||
import testscenarios
|
||||
import testtools
|
||||
|
||||
import oslo_messaging
|
||||
from oslo_messaging._drivers import impl_qpid as qpid_driver
|
||||
from oslo_messaging.tests import utils as test_utils
|
||||
|
||||
|
||||
load_tests = testscenarios.load_tests_apply_scenarios
|
||||
|
||||
QPID_BROKER = 'localhost:5672'
|
||||
|
||||
|
||||
class TestQpidDriverLoad(test_utils.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestQpidDriverLoad, self).setUp()
|
||||
self.messaging_conf.transport_driver = 'qpid'
|
||||
|
||||
def test_driver_load(self):
|
||||
transport = oslo_messaging.get_transport(self.conf)
|
||||
self.assertIsInstance(transport._driver, qpid_driver.QpidDriver)
|
||||
|
||||
|
||||
def _is_qpidd_service_running():
|
||||
|
||||
"""this function checks if the qpid service is running or not."""
|
||||
|
||||
qpid_running = True
|
||||
try:
|
||||
broker = QPID_BROKER
|
||||
connection = qpid.messaging.Connection(broker)
|
||||
connection.open()
|
||||
except Exception:
|
||||
# qpid service is not running.
|
||||
qpid_running = False
|
||||
else:
|
||||
connection.close()
|
||||
|
||||
return qpid_running
|
||||
|
||||
|
||||
class _QpidBaseTestCase(test_utils.BaseTestCase):
|
||||
|
||||
@testtools.skipIf(qpid is None, "qpid not available")
|
||||
def setUp(self):
|
||||
super(_QpidBaseTestCase, self).setUp()
|
||||
self.messaging_conf.transport_driver = 'qpid'
|
||||
self.fake_qpid = not _is_qpidd_service_running()
|
||||
|
||||
if self.fake_qpid:
|
||||
self.session_receive = get_fake_qpid_session()
|
||||
self.session_send = get_fake_qpid_session()
|
||||
else:
|
||||
self.broker = QPID_BROKER
|
||||
# create connection from the qpid.messaging
|
||||
# connection for the Consumer.
|
||||
self.con_receive = qpid.messaging.Connection(self.broker)
|
||||
self.con_receive.open()
|
||||
# session to receive the messages
|
||||
self.session_receive = self.con_receive.session()
|
||||
|
||||
# connection for sending the message
|
||||
self.con_send = qpid.messaging.Connection(self.broker)
|
||||
self.con_send.open()
|
||||
# session to send the messages
|
||||
self.session_send = self.con_send.session()
|
||||
|
||||
# list to store the expected messages and
|
||||
# the actual received messages
|
||||
self._expected = []
|
||||
self._messages = []
|
||||
self.initialized = True
|
||||
|
||||
def tearDown(self):
|
||||
super(_QpidBaseTestCase, self).tearDown()
|
||||
|
||||
if self.initialized:
|
||||
if self.fake_qpid:
|
||||
_fake_session.flush_exchanges()
|
||||
else:
|
||||
self.con_receive.close()
|
||||
self.con_send.close()
|
||||
|
||||
|
||||
class TestQpidTransportURL(_QpidBaseTestCase):
|
||||
|
||||
scenarios = [
|
||||
('none', dict(url=None,
|
||||
expected=[dict(host='localhost:5672',
|
||||
username='',
|
||||
password='')])),
|
||||
('empty',
|
||||
dict(url='qpid:///',
|
||||
expected=[dict(host='localhost:5672',
|
||||
username='',
|
||||
password='')])),
|
||||
('localhost',
|
||||
dict(url='qpid://localhost/',
|
||||
expected=[dict(host='localhost',
|
||||
username='',
|
||||
password='')])),
|
||||
('no_creds',
|
||||
dict(url='qpid://host/',
|
||||
expected=[dict(host='host',
|
||||
username='',
|
||||
password='')])),
|
||||
('no_port',
|
||||
dict(url='qpid://user:password@host/',
|
||||
expected=[dict(host='host',
|
||||
username='user',
|
||||
password='password')])),
|
||||
('full_url',
|
||||
dict(url='qpid://user:password@host:10/',
|
||||
expected=[dict(host='host:10',
|
||||
username='user',
|
||||
password='password')])),
|
||||
('full_two_url',
|
||||
dict(url='qpid://user:password@host:10,'
|
||||
'user2:password2@host2:12/',
|
||||
expected=[dict(host='host:10',
|
||||
username='user',
|
||||
password='password'),
|
||||
dict(host='host2:12',
|
||||
username='user2',
|
||||
password='password2')
|
||||
]
|
||||
)),
|
||||
|
||||
]
|
||||
|
||||
@mock.patch.object(qpid_driver.Connection, 'reconnect')
|
||||
def test_transport_url(self, *args):
|
||||
transport = oslo_messaging.get_transport(self.conf, self.url)
|
||||
self.addCleanup(transport.cleanup)
|
||||
driver = transport._driver
|
||||
|
||||
brokers_params = driver._get_connection().brokers_params
|
||||
self.assertEqual(sorted(self.expected,
|
||||
key=operator.itemgetter('host')),
|
||||
sorted(brokers_params,
|
||||
key=operator.itemgetter('host')))
|
||||
|
||||
|
||||
class TestQpidInvalidTopologyVersion(_QpidBaseTestCase):
|
||||
"""Unit test cases to test invalid qpid topology version."""
|
||||
|
||||
scenarios = [
|
||||
('direct', dict(consumer_cls=qpid_driver.DirectConsumer,
|
||||
consumer_kwargs={},
|
||||
publisher_cls=qpid_driver.DirectPublisher,
|
||||
publisher_kwargs={})),
|
||||
('topic', dict(consumer_cls=qpid_driver.TopicConsumer,
|
||||
consumer_kwargs={'exchange_name': 'openstack'},
|
||||
publisher_cls=qpid_driver.TopicPublisher,
|
||||
publisher_kwargs={'exchange_name': 'openstack'})),
|
||||
('fanout', dict(consumer_cls=qpid_driver.FanoutConsumer,
|
||||
consumer_kwargs={},
|
||||
publisher_cls=qpid_driver.FanoutPublisher,
|
||||
publisher_kwargs={})),
|
||||
]
|
||||
|
||||
def setUp(self):
|
||||
super(TestQpidInvalidTopologyVersion, self).setUp()
|
||||
self.config(qpid_topology_version=-1)
|
||||
|
||||
def test_invalid_topology_version(self):
|
||||
def consumer_callback(msg):
|
||||
pass
|
||||
|
||||
msgid_or_topic = 'test'
|
||||
|
||||
# not using self.assertRaises because
|
||||
# 1. qpid driver raises Exception(msg) for invalid topology version
|
||||
# 2. flake8 - H202 assertRaises Exception too broad
|
||||
exception_msg = ("Invalid value for qpid_topology_version: %d" %
|
||||
self.conf.qpid_topology_version)
|
||||
recvd_exc_msg = ''
|
||||
|
||||
try:
|
||||
self.consumer_cls(self.conf,
|
||||
self.session_receive,
|
||||
msgid_or_topic,
|
||||
consumer_callback,
|
||||
**self.consumer_kwargs)
|
||||
except Exception as e:
|
||||
recvd_exc_msg = e.message
|
||||
|
||||
self.assertEqual(exception_msg, recvd_exc_msg)
|
||||
|
||||
recvd_exc_msg = ''
|
||||
try:
|
||||
self.publisher_cls(self.conf,
|
||||
self.session_send,
|
||||
topic=msgid_or_topic,
|
||||
**self.publisher_kwargs)
|
||||
except Exception as e:
|
||||
recvd_exc_msg = e.message
|
||||
|
||||
self.assertEqual(exception_msg, recvd_exc_msg)
|
||||
|
||||
|
||||
class TestQpidDirectConsumerPublisher(_QpidBaseTestCase):
|
||||
"""Unit test cases to test DirectConsumer and Direct Publisher."""
|
||||
|
||||
_n_qpid_topology = [
|
||||
('v1', dict(qpid_topology=1)),
|
||||
('v2', dict(qpid_topology=2)),
|
||||
]
|
||||
|
||||
_n_msgs = [
|
||||
('single', dict(no_msgs=1)),
|
||||
('multiple', dict(no_msgs=10)),
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def generate_scenarios(cls):
|
||||
cls.scenarios = testscenarios.multiply_scenarios(cls._n_qpid_topology,
|
||||
cls._n_msgs)
|
||||
|
||||
def consumer_callback(self, msg):
|
||||
# This function will be called by the DirectConsumer
|
||||
# when any message is received.
|
||||
# Append the received message into the messages list
|
||||
# so that the received messages can be validated
|
||||
# with the expected messages
|
||||
if isinstance(msg, dict):
|
||||
self._messages.append(msg['content'])
|
||||
else:
|
||||
self._messages.append(msg)
|
||||
|
||||
def test_qpid_direct_consumer_producer(self):
|
||||
self.msgid = str(random.randint(1, 100))
|
||||
|
||||
# create a DirectConsumer and DirectPublisher class objects
|
||||
self.dir_cons = qpid_driver.DirectConsumer(self.conf,
|
||||
self.session_receive,
|
||||
self.msgid,
|
||||
self.consumer_callback)
|
||||
self.dir_pub = qpid_driver.DirectPublisher(self.conf,
|
||||
self.session_send,
|
||||
self.msgid)
|
||||
|
||||
def try_send_msg(no_msgs):
|
||||
for i in range(no_msgs):
|
||||
self._expected.append(str(i))
|
||||
snd_msg = {'content_type': 'text/plain', 'content': str(i)}
|
||||
self.dir_pub.send(snd_msg)
|
||||
|
||||
def try_receive_msg(no_msgs):
|
||||
for i in range(no_msgs):
|
||||
self.dir_cons.consume()
|
||||
|
||||
thread1 = threading.Thread(target=try_receive_msg,
|
||||
args=(self.no_msgs,))
|
||||
thread2 = threading.Thread(target=try_send_msg,
|
||||
args=(self.no_msgs,))
|
||||
|
||||
thread1.start()
|
||||
thread2.start()
|
||||
thread1.join()
|
||||
thread2.join()
|
||||
|
||||
self.assertEqual(self.no_msgs, len(self._messages))
|
||||
self.assertEqual(self._expected, self._messages)
|
||||
|
||||
|
||||
TestQpidDirectConsumerPublisher.generate_scenarios()
|
||||
|
||||
|
||||
class TestQpidTopicAndFanout(_QpidBaseTestCase):
|
||||
"""Unit Test cases to test TopicConsumer and
|
||||
TopicPublisher classes of the qpid driver
|
||||
and FanoutConsumer and FanoutPublisher classes
|
||||
of the qpid driver
|
||||
"""
|
||||
|
||||
_n_qpid_topology = [
|
||||
('v1', dict(qpid_topology=1)),
|
||||
('v2', dict(qpid_topology=2)),
|
||||
]
|
||||
|
||||
_n_msgs = [
|
||||
('single', dict(no_msgs=1)),
|
||||
('multiple', dict(no_msgs=10)),
|
||||
]
|
||||
|
||||
_n_senders = [
|
||||
('single', dict(no_senders=1)),
|
||||
('multiple', dict(no_senders=10)),
|
||||
]
|
||||
|
||||
_n_receivers = [
|
||||
('single', dict(no_receivers=1)),
|
||||
]
|
||||
_exchange_class = [
|
||||
('topic', dict(consumer_cls=qpid_driver.TopicConsumer,
|
||||
consumer_kwargs={'exchange_name': 'openstack'},
|
||||
publisher_cls=qpid_driver.TopicPublisher,
|
||||
publisher_kwargs={'exchange_name': 'openstack'},
|
||||
topic='topictest.test',
|
||||
receive_topic='topictest.test')),
|
||||
('fanout', dict(consumer_cls=qpid_driver.FanoutConsumer,
|
||||
consumer_kwargs={},
|
||||
publisher_cls=qpid_driver.FanoutPublisher,
|
||||
publisher_kwargs={},
|
||||
topic='fanouttest',
|
||||
receive_topic='fanouttest')),
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def generate_scenarios(cls):
|
||||
cls.scenarios = testscenarios.multiply_scenarios(cls._n_qpid_topology,
|
||||
cls._n_msgs,
|
||||
cls._n_senders,
|
||||
cls._n_receivers,
|
||||
cls._exchange_class)
|
||||
|
||||
def setUp(self):
|
||||
super(TestQpidTopicAndFanout, self).setUp()
|
||||
|
||||
# to store the expected messages and the
|
||||
# actual received messages
|
||||
#
|
||||
# NOTE(dhellmann): These are dicts, where the base class uses
|
||||
# lists.
|
||||
self._expected = {}
|
||||
self._messages = {}
|
||||
|
||||
self._senders = []
|
||||
self._receivers = []
|
||||
|
||||
self._sender_threads = []
|
||||
self._receiver_threads = []
|
||||
|
||||
def consumer_callback(self, msg):
|
||||
"""callback function called by the ConsumerBase class of
|
||||
qpid driver.
|
||||
Message will be received in the format x-y
|
||||
where x is the sender id and y is the msg number of the sender
|
||||
extract the sender id 'x' and store the msg 'x-y' with 'x' as
|
||||
the key
|
||||
"""
|
||||
|
||||
if isinstance(msg, dict):
|
||||
msgcontent = msg['content']
|
||||
else:
|
||||
msgcontent = msg
|
||||
|
||||
splitmsg = msgcontent.split('-')
|
||||
key = _thread.get_ident()
|
||||
|
||||
if key not in self._messages:
|
||||
self._messages[key] = dict()
|
||||
|
||||
tdict = self._messages[key]
|
||||
|
||||
if splitmsg[0] not in tdict:
|
||||
tdict[splitmsg[0]] = []
|
||||
|
||||
tdict[splitmsg[0]].append(msgcontent)
|
||||
|
||||
def _try_send_msg(self, sender_id, no_msgs):
|
||||
for i in range(no_msgs):
|
||||
sendmsg = '%s-%s' % (str(sender_id), str(i))
|
||||
key = str(sender_id)
|
||||
# Store the message in the self._expected for each sender.
|
||||
# This will be used later to
|
||||
# validate the test by comparing it with the
|
||||
# received messages by all the receivers
|
||||
if key not in self._expected:
|
||||
self._expected[key] = []
|
||||
self._expected[key].append(sendmsg)
|
||||
send_dict = {'content_type': 'text/plain', 'content': sendmsg}
|
||||
self._senders[sender_id].send(send_dict)
|
||||
|
||||
def _try_receive_msg(self, receiver_id, no_msgs):
|
||||
for i in range(self.no_senders * no_msgs):
|
||||
no_of_attempts = 0
|
||||
|
||||
# ConsumerBase.consume blocks indefinitely until a message
|
||||
# is received.
|
||||
# So qpid_receiver.available() is called before calling
|
||||
# ConsumerBase.consume() so that we are not
|
||||
# blocked indefinitely
|
||||
qpid_receiver = self._receivers[receiver_id].get_receiver()
|
||||
while no_of_attempts < 50:
|
||||
if qpid_receiver.available() > 0:
|
||||
self._receivers[receiver_id].consume()
|
||||
break
|
||||
no_of_attempts += 1
|
||||
time.sleep(0.05)
|
||||
|
||||
def test_qpid_topic_and_fanout(self):
|
||||
for receiver_id in range(self.no_receivers):
|
||||
consumer = self.consumer_cls(self.conf,
|
||||
self.session_receive,
|
||||
self.receive_topic,
|
||||
self.consumer_callback,
|
||||
**self.consumer_kwargs)
|
||||
self._receivers.append(consumer)
|
||||
|
||||
# create receivers threads
|
||||
thread = threading.Thread(target=self._try_receive_msg,
|
||||
args=(receiver_id, self.no_msgs,))
|
||||
self._receiver_threads.append(thread)
|
||||
|
||||
for sender_id in range(self.no_senders):
|
||||
publisher = self.publisher_cls(self.conf,
|
||||
self.session_send,
|
||||
topic=self.topic,
|
||||
**self.publisher_kwargs)
|
||||
self._senders.append(publisher)
|
||||
|
||||
# create sender threads
|
||||
thread = threading.Thread(target=self._try_send_msg,
|
||||
args=(sender_id, self.no_msgs,))
|
||||
self._sender_threads.append(thread)
|
||||
|
||||
for thread in self._receiver_threads:
|
||||
thread.start()
|
||||
|
||||
for thread in self._sender_threads:
|
||||
thread.start()
|
||||
|
||||
for thread in self._receiver_threads:
|
||||
thread.join()
|
||||
|
||||
for thread in self._sender_threads:
|
||||
thread.join()
|
||||
|
||||
# Each receiver should receive all the messages sent by
|
||||
# the sender(s).
|
||||
# So, Iterate through each of the receiver items in
|
||||
# self._messages and compare with the expected messages
|
||||
# messages.
|
||||
|
||||
self.assertEqual(self.no_senders, len(self._expected))
|
||||
self.assertEqual(self.no_receivers, len(self._messages))
|
||||
|
||||
for key, messages in self._messages.iteritems():
|
||||
self.assertEqual(self._expected, messages)
|
||||
|
||||
TestQpidTopicAndFanout.generate_scenarios()
|
||||
|
||||
|
||||
class AddressNodeMatcher(object):
|
||||
def __init__(self, node):
|
||||
self.node = node
|
||||
|
||||
def __eq__(self, address):
|
||||
return address.split(';')[0].strip() == self.node
|
||||
|
||||
|
||||
class TestDriverInterface(_QpidBaseTestCase):
|
||||
"""Unit Test cases to test the amqpdriver with qpid
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
super(TestDriverInterface, self).setUp()
|
||||
self.config(qpid_topology_version=2)
|
||||
transport = oslo_messaging.get_transport(self.conf)
|
||||
self.driver = transport._driver
|
||||
|
||||
original_get_connection = self.driver._get_connection
|
||||
p = mock.patch.object(self.driver, '_get_connection',
|
||||
side_effect=lambda pooled=True:
|
||||
original_get_connection(False))
|
||||
p.start()
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
def test_listen_and_direct_send(self):
|
||||
target = oslo_messaging.Target(exchange="exchange_test",
|
||||
topic="topic_test",
|
||||
server="server_test")
|
||||
|
||||
with mock.patch('qpid.messaging.Connection') as conn_cls:
|
||||
conn = conn_cls.return_value
|
||||
session = conn.session.return_value
|
||||
session.receiver.side_effect = [mock.Mock(), mock.Mock(),
|
||||
mock.Mock()]
|
||||
|
||||
listener = self.driver.listen(target)
|
||||
listener.conn.direct_send("msg_id", {})
|
||||
|
||||
self.assertEqual(3, len(listener.conn.consumers))
|
||||
|
||||
expected_calls = [
|
||||
mock.call(AddressNodeMatcher(
|
||||
'amq.topic/topic/exchange_test/topic_test')),
|
||||
mock.call(AddressNodeMatcher(
|
||||
'amq.topic/topic/exchange_test/topic_test.server_test')),
|
||||
mock.call(AddressNodeMatcher('amq.topic/fanout/topic_test')),
|
||||
]
|
||||
session.receiver.assert_has_calls(expected_calls)
|
||||
session.sender.assert_called_with(
|
||||
AddressNodeMatcher("amq.direct/msg_id"))
|
||||
|
||||
def test_send(self):
|
||||
target = oslo_messaging.Target(exchange="exchange_test",
|
||||
topic="topic_test",
|
||||
server="server_test")
|
||||
with mock.patch('qpid.messaging.Connection') as conn_cls:
|
||||
conn = conn_cls.return_value
|
||||
session = conn.session.return_value
|
||||
|
||||
self.driver.send(target, {}, {})
|
||||
session.sender.assert_called_with(AddressNodeMatcher(
|
||||
"amq.topic/topic/exchange_test/topic_test.server_test"))
|
||||
|
||||
def test_send_notification(self):
|
||||
target = oslo_messaging.Target(exchange="exchange_test",
|
||||
topic="topic_test.info")
|
||||
with mock.patch('qpid.messaging.Connection') as conn_cls:
|
||||
conn = conn_cls.return_value
|
||||
session = conn.session.return_value
|
||||
|
||||
self.driver.send_notification(target, {}, {}, "2.0")
|
||||
session.sender.assert_called_with(AddressNodeMatcher(
|
||||
"amq.topic/topic/exchange_test/topic_test.info"))
|
||||
|
||||
|
||||
class TestQpidReconnectOrder(test_utils.BaseTestCase):
|
||||
"""Unit Test cases to test reconnection
|
||||
"""
|
||||
|
||||
@testtools.skipIf(qpid is None, "qpid not available")
|
||||
def test_reconnect_order(self):
|
||||
brokers = ['host1', 'host2', 'host3', 'host4', 'host5']
|
||||
brokers_count = len(brokers)
|
||||
|
||||
self.config(qpid_hosts=brokers)
|
||||
|
||||
with mock.patch('qpid.messaging.Connection') as conn_mock:
|
||||
# starting from the first broker in the list
|
||||
url = oslo_messaging.TransportURL.parse(self.conf, None)
|
||||
connection = qpid_driver.Connection(self.conf, url)
|
||||
|
||||
# reconnect will advance to the next broker, one broker per
|
||||
# attempt, and then wrap to the start of the list once the end is
|
||||
# reached
|
||||
for _ in range(brokers_count):
|
||||
connection.reconnect()
|
||||
|
||||
expected = []
|
||||
for broker in brokers:
|
||||
expected.extend([mock.call("%s:5672" % broker),
|
||||
mock.call().open(),
|
||||
mock.call().session(),
|
||||
mock.call().opened(),
|
||||
mock.call().opened().__nonzero__(),
|
||||
mock.call().close()])
|
||||
|
||||
conn_mock.assert_has_calls(expected, any_order=True)
|
||||
|
||||
|
||||
def synchronized(func):
|
||||
func.__lock__ = threading.Lock()
|
||||
|
||||
def synced_func(*args, **kws):
|
||||
with func.__lock__:
|
||||
return func(*args, **kws)
|
||||
|
||||
return synced_func
|
||||
|
||||
|
||||
class FakeQpidMsgManager(object):
|
||||
def __init__(self):
|
||||
self._exchanges = {}
|
||||
|
||||
@synchronized
|
||||
def add_exchange(self, exchange):
|
||||
if exchange not in self._exchanges:
|
||||
self._exchanges[exchange] = {'msgs': [], 'consumers': {}}
|
||||
|
||||
@synchronized
|
||||
def add_exchange_consumer(self, exchange, consumer_id):
|
||||
exchange_info = self._exchanges[exchange]
|
||||
cons_dict = exchange_info['consumers']
|
||||
cons_dict[consumer_id] = 0
|
||||
|
||||
@synchronized
|
||||
def add_exchange_msg(self, exchange, msg):
|
||||
exchange_info = self._exchanges[exchange]
|
||||
exchange_info['msgs'].append(msg)
|
||||
|
||||
def get_exchange_msg(self, exchange, index):
|
||||
exchange_info = self._exchanges[exchange]
|
||||
return exchange_info['msgs'][index]
|
||||
|
||||
def get_no_exch_msgs(self, exchange):
|
||||
exchange_info = self._exchanges[exchange]
|
||||
return len(exchange_info['msgs'])
|
||||
|
||||
def get_exch_cons_index(self, exchange, consumer_id):
|
||||
exchange_info = self._exchanges[exchange]
|
||||
cons_dict = exchange_info['consumers']
|
||||
return cons_dict[consumer_id]
|
||||
|
||||
@synchronized
|
||||
def inc_consumer_index(self, exchange, consumer_id):
|
||||
exchange_info = self._exchanges[exchange]
|
||||
cons_dict = exchange_info['consumers']
|
||||
cons_dict[consumer_id] += 1
|
||||
|
||||
_fake_qpid_msg_manager = FakeQpidMsgManager()
|
||||
|
||||
|
||||
class FakeQpidSessionSender(object):
|
||||
def __init__(self, session, id, target, options):
|
||||
self.session = session
|
||||
self.id = id
|
||||
self.target = target
|
||||
self.options = options
|
||||
|
||||
@synchronized
|
||||
def send(self, object, sync=True, timeout=None):
|
||||
_fake_qpid_msg_manager.add_exchange_msg(self.target, object)
|
||||
|
||||
def close(self, timeout=None):
|
||||
pass
|
||||
|
||||
|
||||
class FakeQpidSessionReceiver(object):
|
||||
|
||||
def __init__(self, session, id, source, options):
|
||||
self.session = session
|
||||
self.id = id
|
||||
self.source = source
|
||||
self.options = options
|
||||
|
||||
@synchronized
|
||||
def fetch(self, timeout=None):
|
||||
if timeout is None:
|
||||
# if timeout is not given, take a default time out
|
||||
# of 30 seconds to avoid indefinite loop
|
||||
_timeout = 30
|
||||
else:
|
||||
_timeout = timeout
|
||||
|
||||
deadline = time.time() + _timeout
|
||||
while time.time() <= deadline:
|
||||
index = _fake_qpid_msg_manager.get_exch_cons_index(self.source,
|
||||
self.id)
|
||||
try:
|
||||
msg = _fake_qpid_msg_manager.get_exchange_msg(self.source,
|
||||
index)
|
||||
except IndexError:
|
||||
pass
|
||||
else:
|
||||
_fake_qpid_msg_manager.inc_consumer_index(self.source,
|
||||
self.id)
|
||||
return qpid.messaging.Message(msg)
|
||||
time.sleep(0.050)
|
||||
|
||||
if timeout is None:
|
||||
raise Exception('timed out waiting for reply')
|
||||
|
||||
def close(self, timeout=None):
|
||||
pass
|
||||
|
||||
@synchronized
|
||||
def available(self):
|
||||
no_msgs = _fake_qpid_msg_manager.get_no_exch_msgs(self.source)
|
||||
index = _fake_qpid_msg_manager.get_exch_cons_index(self.source,
|
||||
self.id)
|
||||
if no_msgs == 0 or index >= no_msgs:
|
||||
return 0
|
||||
else:
|
||||
return no_msgs - index
|
||||
|
||||
|
||||
class FakeQpidSession(object):
|
||||
|
||||
def __init__(self, connection=None, name=None, transactional=None):
|
||||
self.connection = connection
|
||||
self.name = name
|
||||
self.transactional = transactional
|
||||
self._receivers = {}
|
||||
self.conf = None
|
||||
self.url = None
|
||||
self._senders = {}
|
||||
self._sender_id = 0
|
||||
self._receiver_id = 0
|
||||
|
||||
@synchronized
|
||||
def sender(self, target, **options):
|
||||
exchange_key = self._extract_exchange_key(target)
|
||||
_fake_qpid_msg_manager.add_exchange(exchange_key)
|
||||
|
||||
sendobj = FakeQpidSessionSender(self, self._sender_id,
|
||||
exchange_key, options)
|
||||
self._senders[self._sender_id] = sendobj
|
||||
self._sender_id = self._sender_id + 1
|
||||
return sendobj
|
||||
|
||||
@synchronized
|
||||
def receiver(self, source, **options):
|
||||
exchange_key = self._extract_exchange_key(source)
|
||||
_fake_qpid_msg_manager.add_exchange(exchange_key)
|
||||
recvobj = FakeQpidSessionReceiver(self, self._receiver_id,
|
||||
exchange_key, options)
|
||||
self._receivers[self._receiver_id] = recvobj
|
||||
_fake_qpid_msg_manager.add_exchange_consumer(exchange_key,
|
||||
self._receiver_id)
|
||||
self._receiver_id += 1
|
||||
return recvobj
|
||||
|
||||
def acknowledge(self, message=None, disposition=None, sync=True):
|
||||
pass
|
||||
|
||||
@synchronized
|
||||
def flush_exchanges(self):
|
||||
_fake_qpid_msg_manager._exchanges = {}
|
||||
|
||||
def _extract_exchange_key(self, exchange_msg):
|
||||
"""This function extracts a unique key for the exchange.
|
||||
This key is used in the dictionary as a 'key' for
|
||||
this exchange.
|
||||
Eg. if the exchange_msg (for qpid topology version 1)
|
||||
is 33/33 ; {"node": {"x-declare": {"auto-delete": true, ....
|
||||
then 33 is returned as the key.
|
||||
Eg 2. For topology v2, if the
|
||||
exchange_msg is - amq.direct/44 ; {"link": {"x-dec.......
|
||||
then 44 is returned
|
||||
"""
|
||||
# first check for ';'
|
||||
semicolon_split = exchange_msg.split(';')
|
||||
|
||||
# split the first item of semicolon_split with '/'
|
||||
slash_split = semicolon_split[0].split('/')
|
||||
# return the last element of the list as the key
|
||||
key = slash_split[-1]
|
||||
return key.strip()
|
||||
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
_fake_session = FakeQpidSession()
|
||||
|
||||
|
||||
def get_fake_qpid_session():
|
||||
return _fake_session
|
||||
|
||||
|
||||
class QPidHATestCase(test_utils.BaseTestCase):
|
||||
|
||||
@testtools.skipIf(qpid is None, "qpid not available")
|
||||
def setUp(self):
|
||||
super(QPidHATestCase, self).setUp()
|
||||
self.brokers = ['host1', 'host2', 'host3', 'host4', 'host5']
|
||||
|
||||
self.config(qpid_hosts=self.brokers,
|
||||
qpid_username=None,
|
||||
qpid_password=None)
|
||||
|
||||
hostname_sets = set()
|
||||
self.info = {'attempt': 0,
|
||||
'fail': False}
|
||||
|
||||
def _connect(myself, broker):
|
||||
# do as little work that is enough to pass connection attempt
|
||||
myself.connection = mock.Mock()
|
||||
hostname = broker['host']
|
||||
self.assertNotIn(hostname, hostname_sets)
|
||||
hostname_sets.add(hostname)
|
||||
|
||||
self.info['attempt'] += 1
|
||||
if self.info['fail']:
|
||||
raise qpid.messaging.exceptions.ConnectionError
|
||||
|
||||
# just make sure connection instantiation does not fail with an
|
||||
# exception
|
||||
self.stubs.Set(qpid_driver.Connection, '_connect', _connect)
|
||||
|
||||
# starting from the first broker in the list
|
||||
url = oslo_messaging.TransportURL.parse(self.conf, None)
|
||||
self.connection = qpid_driver.Connection(self.conf, url)
|
||||
self.addCleanup(self.connection.close)
|
||||
|
||||
self.info.update({'attempt': 0,
|
||||
'fail': True})
|
||||
hostname_sets.clear()
|
||||
|
||||
def test_reconnect_order(self):
|
||||
self.assertRaises(oslo_messaging.MessageDeliveryFailure,
|
||||
self.connection.reconnect,
|
||||
retry=len(self.brokers) - 1)
|
||||
self.assertEqual(len(self.brokers), self.info['attempt'])
|
||||
|
||||
def test_ensure_four_retries(self):
|
||||
mock_callback = mock.Mock(
|
||||
side_effect=qpid.messaging.exceptions.ConnectionError)
|
||||
self.assertRaises(oslo_messaging.MessageDeliveryFailure,
|
||||
self.connection.ensure, None, mock_callback,
|
||||
retry=4)
|
||||
self.assertEqual(5, self.info['attempt'])
|
||||
self.assertEqual(1, mock_callback.call_count)
|
||||
|
||||
def test_ensure_one_retry(self):
|
||||
mock_callback = mock.Mock(
|
||||
side_effect=qpid.messaging.exceptions.ConnectionError)
|
||||
self.assertRaises(oslo_messaging.MessageDeliveryFailure,
|
||||
self.connection.ensure, None, mock_callback,
|
||||
retry=1)
|
||||
self.assertEqual(2, self.info['attempt'])
|
||||
self.assertEqual(1, mock_callback.call_count)
|
||||
|
||||
def test_ensure_no_retry(self):
|
||||
mock_callback = mock.Mock(
|
||||
side_effect=qpid.messaging.exceptions.ConnectionError)
|
||||
self.assertRaises(oslo_messaging.MessageDeliveryFailure,
|
||||
self.connection.ensure, None, mock_callback,
|
||||
retry=0)
|
||||
self.assertEqual(1, self.info['attempt'])
|
||||
self.assertEqual(1, mock_callback.call_count)
|
712
oslo_messaging/tests/drivers/test_impl_rabbit.py
Normal file
712
oslo_messaging/tests/drivers/test_impl_rabbit.py
Normal file
@ -0,0 +1,712 @@
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import uuid
|
||||
|
||||
import fixtures
|
||||
import kombu
|
||||
import mock
|
||||
from oslotest import mockpatch
|
||||
import testscenarios
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.serialization import jsonutils
|
||||
import oslo_messaging
|
||||
from oslo_messaging._drivers import amqpdriver
|
||||
from oslo_messaging._drivers import common as driver_common
|
||||
from oslo_messaging._drivers import impl_rabbit as rabbit_driver
|
||||
from oslo_messaging.tests import utils as test_utils
|
||||
|
||||
load_tests = testscenarios.load_tests_apply_scenarios
|
||||
|
||||
|
||||
class TestDeprecatedRabbitDriverLoad(test_utils.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestDeprecatedRabbitDriverLoad, self).setUp(
|
||||
conf=cfg.ConfigOpts())
|
||||
self.messaging_conf.transport_driver = 'rabbit'
|
||||
self.config(fake_rabbit=True)
|
||||
|
||||
def test_driver_load(self):
|
||||
transport = oslo_messaging.get_transport(self.conf)
|
||||
self.addCleanup(transport.cleanup)
|
||||
driver = transport._driver
|
||||
url = driver._get_connection()._url
|
||||
|
||||
self.assertIsInstance(driver, rabbit_driver.RabbitDriver)
|
||||
self.assertEqual('memory:////', url)
|
||||
|
||||
|
||||
class TestRabbitDriverLoad(test_utils.BaseTestCase):
|
||||
|
||||
scenarios = [
|
||||
('rabbit', dict(transport_driver='rabbit',
|
||||
url='amqp://guest:guest@localhost:5672//')),
|
||||
('kombu', dict(transport_driver='kombu',
|
||||
url='amqp://guest:guest@localhost:5672//')),
|
||||
('rabbit+memory', dict(transport_driver='kombu+memory',
|
||||
url='memory:///'))
|
||||
]
|
||||
|
||||
@mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.ensure')
|
||||
@mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.reset')
|
||||
def test_driver_load(self, fake_ensure, fake_reset):
|
||||
self.messaging_conf.transport_driver = self.transport_driver
|
||||
transport = oslo_messaging.get_transport(self.conf)
|
||||
self.addCleanup(transport.cleanup)
|
||||
driver = transport._driver
|
||||
url = driver._get_connection()._url
|
||||
|
||||
self.assertIsInstance(driver, rabbit_driver.RabbitDriver)
|
||||
self.assertEqual(self.url, url)
|
||||
|
||||
|
||||
class TestRabbitIterconsume(test_utils.BaseTestCase):
|
||||
|
||||
def test_iterconsume_timeout(self):
|
||||
transport = oslo_messaging.get_transport(self.conf,
|
||||
'kombu+memory:////')
|
||||
self.addCleanup(transport.cleanup)
|
||||
deadline = time.time() + 3
|
||||
with transport._driver._get_connection() as conn:
|
||||
conn.iterconsume(timeout=3)
|
||||
# kombu memory transport doesn't really raise error
|
||||
# so just simulate a real driver behavior
|
||||
conn.connection.connection.recoverable_channel_errors = (IOError,)
|
||||
conn.declare_fanout_consumer("notif.info", lambda msg: True)
|
||||
with mock.patch('kombu.connection.Connection.drain_events',
|
||||
side_effect=IOError):
|
||||
try:
|
||||
conn.consume(timeout=3)
|
||||
except driver_common.Timeout:
|
||||
pass
|
||||
|
||||
self.assertEqual(0, int(deadline - time.time()))
|
||||
|
||||
|
||||
class TestRabbitTransportURL(test_utils.BaseTestCase):
|
||||
|
||||
scenarios = [
|
||||
('none', dict(url=None,
|
||||
expected=["amqp://guest:guest@localhost:5672//"])),
|
||||
('memory', dict(url='kombu+memory:////',
|
||||
expected=["memory:///"])),
|
||||
('empty',
|
||||
dict(url='rabbit:///',
|
||||
expected=['amqp://guest:guest@localhost:5672/'])),
|
||||
('localhost',
|
||||
dict(url='rabbit://localhost/',
|
||||
expected=['amqp://:@localhost:5672/'])),
|
||||
('virtual_host',
|
||||
dict(url='rabbit:///vhost',
|
||||
expected=['amqp://guest:guest@localhost:5672/vhost'])),
|
||||
('no_creds',
|
||||
dict(url='rabbit://host/virtual_host',
|
||||
expected=['amqp://:@host:5672/virtual_host'])),
|
||||
('no_port',
|
||||
dict(url='rabbit://user:password@host/virtual_host',
|
||||
expected=['amqp://user:password@host:5672/virtual_host'])),
|
||||
('full_url',
|
||||
dict(url='rabbit://user:password@host:10/virtual_host',
|
||||
expected=['amqp://user:password@host:10/virtual_host'])),
|
||||
('full_two_url',
|
||||
dict(url='rabbit://user:password@host:10,'
|
||||
'user2:password2@host2:12/virtual_host',
|
||||
expected=["amqp://user:password@host:10/virtual_host",
|
||||
"amqp://user2:password2@host2:12/virtual_host"]
|
||||
)),
|
||||
]
|
||||
|
||||
def setUp(self):
|
||||
super(TestRabbitTransportURL, self).setUp()
|
||||
self.messaging_conf.transport_driver = 'rabbit'
|
||||
|
||||
@mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.ensure')
|
||||
@mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.reset')
|
||||
def test_transport_url(self, fake_ensure_connection, fake_reset):
|
||||
transport = oslo_messaging.get_transport(self.conf, self.url)
|
||||
self.addCleanup(transport.cleanup)
|
||||
driver = transport._driver
|
||||
|
||||
urls = driver._get_connection()._url.split(";")
|
||||
self.assertEqual(sorted(self.expected), sorted(urls))
|
||||
|
||||
|
||||
class TestSendReceive(test_utils.BaseTestCase):
|
||||
|
||||
_n_senders = [
|
||||
('single_sender', dict(n_senders=1)),
|
||||
('multiple_senders', dict(n_senders=10)),
|
||||
]
|
||||
|
||||
_context = [
|
||||
('empty_context', dict(ctxt={})),
|
||||
('with_context', dict(ctxt={'user': 'mark'})),
|
||||
]
|
||||
|
||||
_reply = [
|
||||
('rx_id', dict(rx_id=True, reply=None)),
|
||||
('none', dict(rx_id=False, reply=None)),
|
||||
('empty_list', dict(rx_id=False, reply=[])),
|
||||
('empty_dict', dict(rx_id=False, reply={})),
|
||||
('false', dict(rx_id=False, reply=False)),
|
||||
('zero', dict(rx_id=False, reply=0)),
|
||||
]
|
||||
|
||||
_failure = [
|
||||
('success', dict(failure=False)),
|
||||
('failure', dict(failure=True, expected=False)),
|
||||
('expected_failure', dict(failure=True, expected=True)),
|
||||
]
|
||||
|
||||
_timeout = [
|
||||
('no_timeout', dict(timeout=None)),
|
||||
('timeout', dict(timeout=0.01)), # FIXME(markmc): timeout=0 is broken?
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def generate_scenarios(cls):
|
||||
cls.scenarios = testscenarios.multiply_scenarios(cls._n_senders,
|
||||
cls._context,
|
||||
cls._reply,
|
||||
cls._failure,
|
||||
cls._timeout)
|
||||
|
||||
def test_send_receive(self):
|
||||
transport = oslo_messaging.get_transport(self.conf,
|
||||
'kombu+memory:////')
|
||||
self.addCleanup(transport.cleanup)
|
||||
|
||||
driver = transport._driver
|
||||
|
||||
target = oslo_messaging.Target(topic='testtopic')
|
||||
|
||||
listener = driver.listen(target)
|
||||
|
||||
senders = []
|
||||
replies = []
|
||||
msgs = []
|
||||
errors = []
|
||||
|
||||
def stub_error(msg, *a, **kw):
|
||||
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
|
||||
a = a[0]
|
||||
errors.append(str(msg) % a)
|
||||
|
||||
self.stubs.Set(driver_common.LOG, 'error', stub_error)
|
||||
|
||||
def send_and_wait_for_reply(i):
|
||||
try:
|
||||
replies.append(driver.send(target,
|
||||
self.ctxt,
|
||||
{'tx_id': i},
|
||||
wait_for_reply=True,
|
||||
timeout=self.timeout))
|
||||
self.assertFalse(self.failure)
|
||||
self.assertIsNone(self.timeout)
|
||||
except (ZeroDivisionError, oslo_messaging.MessagingTimeout) as e:
|
||||
replies.append(e)
|
||||
self.assertTrue(self.failure or self.timeout is not None)
|
||||
|
||||
while len(senders) < self.n_senders:
|
||||
senders.append(threading.Thread(target=send_and_wait_for_reply,
|
||||
args=(len(senders), )))
|
||||
|
||||
for i in range(len(senders)):
|
||||
senders[i].start()
|
||||
|
||||
received = listener.poll()
|
||||
self.assertIsNotNone(received)
|
||||
self.assertEqual(self.ctxt, received.ctxt)
|
||||
self.assertEqual({'tx_id': i}, received.message)
|
||||
msgs.append(received)
|
||||
|
||||
# reply in reverse, except reply to the first guy second from last
|
||||
order = list(range(len(senders) - 1, -1, -1))
|
||||
if len(order) > 1:
|
||||
order[-1], order[-2] = order[-2], order[-1]
|
||||
|
||||
for i in order:
|
||||
if self.timeout is None:
|
||||
if self.failure:
|
||||
try:
|
||||
raise ZeroDivisionError
|
||||
except Exception:
|
||||
failure = sys.exc_info()
|
||||
msgs[i].reply(failure=failure,
|
||||
log_failure=not self.expected)
|
||||
elif self.rx_id:
|
||||
msgs[i].reply({'rx_id': i})
|
||||
else:
|
||||
msgs[i].reply(self.reply)
|
||||
senders[i].join()
|
||||
|
||||
self.assertEqual(len(senders), len(replies))
|
||||
for i, reply in enumerate(replies):
|
||||
if self.timeout is not None:
|
||||
self.assertIsInstance(reply, oslo_messaging.MessagingTimeout)
|
||||
elif self.failure:
|
||||
self.assertIsInstance(reply, ZeroDivisionError)
|
||||
elif self.rx_id:
|
||||
self.assertEqual({'rx_id': order[i]}, reply)
|
||||
else:
|
||||
self.assertEqual(self.reply, reply)
|
||||
|
||||
if not self.timeout and self.failure and not self.expected:
|
||||
self.assertTrue(len(errors) > 0, errors)
|
||||
else:
|
||||
self.assertEqual(0, len(errors), errors)
|
||||
|
||||
|
||||
TestSendReceive.generate_scenarios()
|
||||
|
||||
|
||||
class TestPollAsync(test_utils.BaseTestCase):
|
||||
|
||||
def test_poll_timeout(self):
|
||||
transport = oslo_messaging.get_transport(self.conf,
|
||||
'kombu+memory:////')
|
||||
self.addCleanup(transport.cleanup)
|
||||
driver = transport._driver
|
||||
target = oslo_messaging.Target(topic='testtopic')
|
||||
listener = driver.listen(target)
|
||||
received = listener.poll(timeout=0.050)
|
||||
self.assertIsNone(received)
|
||||
|
||||
|
||||
class TestRacyWaitForReply(test_utils.BaseTestCase):
|
||||
|
||||
def test_send_receive(self):
|
||||
transport = oslo_messaging.get_transport(self.conf,
|
||||
'kombu+memory:////')
|
||||
self.addCleanup(transport.cleanup)
|
||||
|
||||
driver = transport._driver
|
||||
|
||||
target = oslo_messaging.Target(topic='testtopic')
|
||||
|
||||
listener = driver.listen(target)
|
||||
|
||||
senders = []
|
||||
replies = []
|
||||
msgs = []
|
||||
|
||||
wait_conditions = []
|
||||
orig_reply_waiter = amqpdriver.ReplyWaiter.wait
|
||||
|
||||
def reply_waiter(self, msg_id, timeout):
|
||||
if wait_conditions:
|
||||
cond = wait_conditions.pop()
|
||||
with cond:
|
||||
cond.notify()
|
||||
with cond:
|
||||
cond.wait()
|
||||
return orig_reply_waiter(self, msg_id, timeout)
|
||||
|
||||
self.stubs.Set(amqpdriver.ReplyWaiter, 'wait', reply_waiter)
|
||||
|
||||
def send_and_wait_for_reply(i, wait_for_reply):
|
||||
replies.append(driver.send(target,
|
||||
{},
|
||||
{'tx_id': i},
|
||||
wait_for_reply=wait_for_reply,
|
||||
timeout=None))
|
||||
|
||||
while len(senders) < 2:
|
||||
t = threading.Thread(target=send_and_wait_for_reply,
|
||||
args=(len(senders), True))
|
||||
t.daemon = True
|
||||
senders.append(t)
|
||||
|
||||
# test the case then msg_id is not set
|
||||
t = threading.Thread(target=send_and_wait_for_reply,
|
||||
args=(len(senders), False))
|
||||
t.daemon = True
|
||||
senders.append(t)
|
||||
|
||||
# Start the first guy, receive his message, but delay his polling
|
||||
notify_condition = threading.Condition()
|
||||
wait_conditions.append(notify_condition)
|
||||
with notify_condition:
|
||||
senders[0].start()
|
||||
notify_condition.wait()
|
||||
|
||||
msgs.append(listener.poll())
|
||||
self.assertEqual({'tx_id': 0}, msgs[-1].message)
|
||||
|
||||
# Start the second guy, receive his message
|
||||
senders[1].start()
|
||||
|
||||
msgs.append(listener.poll())
|
||||
self.assertEqual({'tx_id': 1}, msgs[-1].message)
|
||||
|
||||
# Reply to both in order, making the second thread queue
|
||||
# the reply meant for the first thread
|
||||
msgs[0].reply({'rx_id': 0})
|
||||
msgs[1].reply({'rx_id': 1})
|
||||
|
||||
# Wait for the second thread to finish
|
||||
senders[1].join()
|
||||
|
||||
# Start the 3rd guy, receive his message
|
||||
senders[2].start()
|
||||
|
||||
msgs.append(listener.poll())
|
||||
self.assertEqual({'tx_id': 2}, msgs[-1].message)
|
||||
|
||||
# Verify the _send_reply was not invoked by driver:
|
||||
with mock.patch.object(msgs[2], '_send_reply') as method:
|
||||
msgs[2].reply({'rx_id': 2})
|
||||
self.assertEqual(method.call_count, 0)
|
||||
|
||||
# Wait for the 3rd thread to finish
|
||||
senders[2].join()
|
||||
|
||||
# Let the first thread continue
|
||||
with notify_condition:
|
||||
notify_condition.notify()
|
||||
|
||||
# Wait for the first thread to finish
|
||||
senders[0].join()
|
||||
|
||||
# Verify replies were received out of order
|
||||
self.assertEqual(len(senders), len(replies))
|
||||
self.assertEqual({'rx_id': 1}, replies[0])
|
||||
self.assertIsNone(replies[1])
|
||||
self.assertEqual({'rx_id': 0}, replies[2])
|
||||
|
||||
|
||||
def _declare_queue(target):
|
||||
connection = kombu.connection.BrokerConnection(transport='memory')
|
||||
|
||||
# Kludge to speed up tests.
|
||||
connection.transport.polling_interval = 0.0
|
||||
|
||||
connection.connect()
|
||||
channel = connection.channel()
|
||||
|
||||
# work around 'memory' transport bug in 1.1.3
|
||||
channel._new_queue('ae.undeliver')
|
||||
|
||||
if target.fanout:
|
||||
exchange = kombu.entity.Exchange(name=target.topic + '_fanout',
|
||||
type='fanout',
|
||||
durable=False,
|
||||
auto_delete=True)
|
||||
queue = kombu.entity.Queue(name=target.topic + '_fanout_12345',
|
||||
channel=channel,
|
||||
exchange=exchange,
|
||||
routing_key=target.topic)
|
||||
if target.server:
|
||||
exchange = kombu.entity.Exchange(name='openstack',
|
||||
type='topic',
|
||||
durable=False,
|
||||
auto_delete=False)
|
||||
topic = '%s.%s' % (target.topic, target.server)
|
||||
queue = kombu.entity.Queue(name=topic,
|
||||
channel=channel,
|
||||
exchange=exchange,
|
||||
routing_key=topic)
|
||||
else:
|
||||
exchange = kombu.entity.Exchange(name='openstack',
|
||||
type='topic',
|
||||
durable=False,
|
||||
auto_delete=False)
|
||||
queue = kombu.entity.Queue(name=target.topic,
|
||||
channel=channel,
|
||||
exchange=exchange,
|
||||
routing_key=target.topic)
|
||||
|
||||
queue.declare()
|
||||
|
||||
return connection, channel, queue
|
||||
|
||||
|
||||
class TestRequestWireFormat(test_utils.BaseTestCase):
|
||||
|
||||
_target = [
|
||||
('topic_target',
|
||||
dict(topic='testtopic', server=None, fanout=False)),
|
||||
('server_target',
|
||||
dict(topic='testtopic', server='testserver', fanout=False)),
|
||||
# NOTE(markmc): https://github.com/celery/kombu/issues/195
|
||||
('fanout_target',
|
||||
dict(topic='testtopic', server=None, fanout=True,
|
||||
skip_msg='Requires kombu>2.5.12 to fix kombu issue #195')),
|
||||
]
|
||||
|
||||
_msg = [
|
||||
('empty_msg',
|
||||
dict(msg={}, expected={})),
|
||||
('primitive_msg',
|
||||
dict(msg={'foo': 'bar'}, expected={'foo': 'bar'})),
|
||||
('complex_msg',
|
||||
dict(msg={'a': {'b': datetime.datetime(1920, 2, 3, 4, 5, 6, 7)}},
|
||||
expected={'a': {'b': '1920-02-03T04:05:06.000007'}})),
|
||||
]
|
||||
|
||||
_context = [
|
||||
('empty_ctxt', dict(ctxt={}, expected_ctxt={})),
|
||||
('user_project_ctxt',
|
||||
dict(ctxt={'user': 'mark', 'project': 'snarkybunch'},
|
||||
expected_ctxt={'_context_user': 'mark',
|
||||
'_context_project': 'snarkybunch'})),
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def generate_scenarios(cls):
|
||||
cls.scenarios = testscenarios.multiply_scenarios(cls._msg,
|
||||
cls._context,
|
||||
cls._target)
|
||||
|
||||
def setUp(self):
|
||||
super(TestRequestWireFormat, self).setUp()
|
||||
self.uuids = []
|
||||
self.orig_uuid4 = uuid.uuid4
|
||||
self.useFixture(fixtures.MonkeyPatch('uuid.uuid4', self.mock_uuid4))
|
||||
|
||||
def mock_uuid4(self):
|
||||
self.uuids.append(self.orig_uuid4())
|
||||
return self.uuids[-1]
|
||||
|
||||
def test_request_wire_format(self):
|
||||
if hasattr(self, 'skip_msg'):
|
||||
self.skipTest(self.skip_msg)
|
||||
|
||||
transport = oslo_messaging.get_transport(self.conf,
|
||||
'kombu+memory:////')
|
||||
self.addCleanup(transport.cleanup)
|
||||
|
||||
driver = transport._driver
|
||||
|
||||
target = oslo_messaging.Target(topic=self.topic,
|
||||
server=self.server,
|
||||
fanout=self.fanout)
|
||||
|
||||
connection, channel, queue = _declare_queue(target)
|
||||
self.addCleanup(connection.release)
|
||||
|
||||
driver.send(target, self.ctxt, self.msg)
|
||||
|
||||
msgs = []
|
||||
|
||||
def callback(msg):
|
||||
msg = channel.message_to_python(msg)
|
||||
msg.ack()
|
||||
msgs.append(msg.payload)
|
||||
|
||||
queue.consume(callback=callback,
|
||||
consumer_tag='1',
|
||||
nowait=False)
|
||||
|
||||
connection.drain_events()
|
||||
|
||||
self.assertEqual(1, len(msgs))
|
||||
self.assertIn('oslo.message', msgs[0])
|
||||
|
||||
received = msgs[0]
|
||||
received['oslo.message'] = jsonutils.loads(received['oslo.message'])
|
||||
|
||||
# FIXME(markmc): add _msg_id and _reply_q check
|
||||
expected_msg = {
|
||||
'_unique_id': self.uuids[0].hex,
|
||||
}
|
||||
expected_msg.update(self.expected)
|
||||
expected_msg.update(self.expected_ctxt)
|
||||
|
||||
expected = {
|
||||
'oslo.version': '2.0',
|
||||
'oslo.message': expected_msg,
|
||||
}
|
||||
|
||||
self.assertEqual(expected, received)
|
||||
|
||||
|
||||
TestRequestWireFormat.generate_scenarios()
|
||||
|
||||
|
||||
def _create_producer(target):
|
||||
connection = kombu.connection.BrokerConnection(transport='memory')
|
||||
|
||||
# Kludge to speed up tests.
|
||||
connection.transport.polling_interval = 0.0
|
||||
|
||||
connection.connect()
|
||||
channel = connection.channel()
|
||||
|
||||
# work around 'memory' transport bug in 1.1.3
|
||||
channel._new_queue('ae.undeliver')
|
||||
|
||||
if target.fanout:
|
||||
exchange = kombu.entity.Exchange(name=target.topic + '_fanout',
|
||||
type='fanout',
|
||||
durable=False,
|
||||
auto_delete=True)
|
||||
producer = kombu.messaging.Producer(exchange=exchange,
|
||||
channel=channel,
|
||||
routing_key=target.topic)
|
||||
elif target.server:
|
||||
exchange = kombu.entity.Exchange(name='openstack',
|
||||
type='topic',
|
||||
durable=False,
|
||||
auto_delete=False)
|
||||
topic = '%s.%s' % (target.topic, target.server)
|
||||
producer = kombu.messaging.Producer(exchange=exchange,
|
||||
channel=channel,
|
||||
routing_key=topic)
|
||||
else:
|
||||
exchange = kombu.entity.Exchange(name='openstack',
|
||||
type='topic',
|
||||
durable=False,
|
||||
auto_delete=False)
|
||||
producer = kombu.messaging.Producer(exchange=exchange,
|
||||
channel=channel,
|
||||
routing_key=target.topic)
|
||||
|
||||
return connection, producer
|
||||
|
||||
|
||||
class TestReplyWireFormat(test_utils.BaseTestCase):
|
||||
|
||||
_target = [
|
||||
('topic_target',
|
||||
dict(topic='testtopic', server=None, fanout=False)),
|
||||
('server_target',
|
||||
dict(topic='testtopic', server='testserver', fanout=False)),
|
||||
# NOTE(markmc): https://github.com/celery/kombu/issues/195
|
||||
('fanout_target',
|
||||
dict(topic='testtopic', server=None, fanout=True,
|
||||
skip_msg='Requires kombu>2.5.12 to fix kombu issue #195')),
|
||||
]
|
||||
|
||||
_msg = [
|
||||
('empty_msg',
|
||||
dict(msg={}, expected={})),
|
||||
('primitive_msg',
|
||||
dict(msg={'foo': 'bar'}, expected={'foo': 'bar'})),
|
||||
('complex_msg',
|
||||
dict(msg={'a': {'b': '1920-02-03T04:05:06.000007'}},
|
||||
expected={'a': {'b': '1920-02-03T04:05:06.000007'}})),
|
||||
]
|
||||
|
||||
_context = [
|
||||
('empty_ctxt', dict(ctxt={}, expected_ctxt={})),
|
||||
('user_project_ctxt',
|
||||
dict(ctxt={'_context_user': 'mark',
|
||||
'_context_project': 'snarkybunch'},
|
||||
expected_ctxt={'user': 'mark', 'project': 'snarkybunch'})),
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def generate_scenarios(cls):
|
||||
cls.scenarios = testscenarios.multiply_scenarios(cls._msg,
|
||||
cls._context,
|
||||
cls._target)
|
||||
|
||||
def test_reply_wire_format(self):
|
||||
if hasattr(self, 'skip_msg'):
|
||||
self.skipTest(self.skip_msg)
|
||||
|
||||
transport = oslo_messaging.get_transport(self.conf,
|
||||
'kombu+memory:////')
|
||||
self.addCleanup(transport.cleanup)
|
||||
|
||||
driver = transport._driver
|
||||
|
||||
target = oslo_messaging.Target(topic=self.topic,
|
||||
server=self.server,
|
||||
fanout=self.fanout)
|
||||
|
||||
listener = driver.listen(target)
|
||||
|
||||
connection, producer = _create_producer(target)
|
||||
self.addCleanup(connection.release)
|
||||
|
||||
msg = {
|
||||
'oslo.version': '2.0',
|
||||
'oslo.message': {}
|
||||
}
|
||||
|
||||
msg['oslo.message'].update(self.msg)
|
||||
msg['oslo.message'].update(self.ctxt)
|
||||
|
||||
msg['oslo.message'].update({
|
||||
'_msg_id': uuid.uuid4().hex,
|
||||
'_unique_id': uuid.uuid4().hex,
|
||||
'_reply_q': 'reply_' + uuid.uuid4().hex,
|
||||
})
|
||||
|
||||
msg['oslo.message'] = jsonutils.dumps(msg['oslo.message'])
|
||||
|
||||
producer.publish(msg)
|
||||
|
||||
received = listener.poll()
|
||||
self.assertIsNotNone(received)
|
||||
self.assertEqual(self.expected_ctxt, received.ctxt)
|
||||
self.assertEqual(self.expected, received.message)
|
||||
|
||||
|
||||
TestReplyWireFormat.generate_scenarios()
|
||||
|
||||
|
||||
class RpcKombuHATestCase(test_utils.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(RpcKombuHATestCase, self).setUp()
|
||||
self.brokers = ['host1', 'host2', 'host3', 'host4', 'host5']
|
||||
self.config(rabbit_hosts=self.brokers,
|
||||
rabbit_retry_interval=0.01,
|
||||
rabbit_retry_backoff=0.01,
|
||||
kombu_reconnect_delay=0)
|
||||
|
||||
self.kombu_connect = mock.Mock()
|
||||
self.useFixture(mockpatch.Patch(
|
||||
'kombu.connection.Connection.connect',
|
||||
side_effect=self.kombu_connect))
|
||||
self.useFixture(mockpatch.Patch(
|
||||
'kombu.connection.Connection.channel'))
|
||||
|
||||
# starting from the first broker in the list
|
||||
url = oslo_messaging.TransportURL.parse(self.conf, None)
|
||||
self.connection = rabbit_driver.Connection(self.conf, url)
|
||||
self.addCleanup(self.connection.close)
|
||||
|
||||
def test_ensure_four_retry(self):
|
||||
mock_callback = mock.Mock(side_effect=IOError)
|
||||
self.assertRaises(oslo_messaging.MessageDeliveryFailure,
|
||||
self.connection.ensure, None, mock_callback,
|
||||
retry=4)
|
||||
self.assertEqual(5, self.kombu_connect.call_count)
|
||||
self.assertEqual(6, mock_callback.call_count)
|
||||
|
||||
def test_ensure_one_retry(self):
|
||||
mock_callback = mock.Mock(side_effect=IOError)
|
||||
self.assertRaises(oslo_messaging.MessageDeliveryFailure,
|
||||
self.connection.ensure, None, mock_callback,
|
||||
retry=1)
|
||||
self.assertEqual(2, self.kombu_connect.call_count)
|
||||
self.assertEqual(3, mock_callback.call_count)
|
||||
|
||||
def test_ensure_no_retry(self):
|
||||
mock_callback = mock.Mock(side_effect=IOError)
|
||||
self.assertRaises(oslo_messaging.MessageDeliveryFailure,
|
||||
self.connection.ensure, None, mock_callback,
|
||||
retry=0)
|
||||
self.assertEqual(1, self.kombu_connect.call_count)
|
||||
self.assertEqual(2, mock_callback.call_count)
|
504
oslo_messaging/tests/drivers/test_impl_zmq.py
Normal file
504
oslo_messaging/tests/drivers/test_impl_zmq.py
Normal file
@ -0,0 +1,504 @@
|
||||
# Copyright 2014 Canonical, Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import socket
|
||||
|
||||
import fixtures
|
||||
import mock
|
||||
import testtools
|
||||
|
||||
from oslo.utils import importutils
|
||||
import oslo_messaging
|
||||
from oslo_messaging.tests import utils as test_utils
|
||||
|
||||
# NOTE(jamespage) the zmq driver implementation is currently tied
|
||||
# to eventlet so we have to monkey_patch to support testing
|
||||
# eventlet is not yet py3 compatible, so skip if not installed
|
||||
eventlet = importutils.try_import('eventlet')
|
||||
if eventlet:
|
||||
eventlet.monkey_patch()
|
||||
|
||||
impl_zmq = importutils.try_import('oslo_messaging._drivers.impl_zmq')
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_unused_port():
|
||||
"""Returns an unused port on localhost."""
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
s.bind(('localhost', 0))
|
||||
port = s.getsockname()[1]
|
||||
s.close()
|
||||
return port
|
||||
|
||||
|
||||
class TestConfZmqDriverLoad(test_utils.BaseTestCase):
|
||||
|
||||
@testtools.skipIf(impl_zmq is None, "zmq not available")
|
||||
def setUp(self):
|
||||
super(TestConfZmqDriverLoad, self).setUp()
|
||||
self.messaging_conf.transport_driver = 'zmq'
|
||||
|
||||
def test_driver_load(self):
|
||||
transport = oslo_messaging.get_transport(self.conf)
|
||||
self.assertIsInstance(transport._driver, impl_zmq.ZmqDriver)
|
||||
|
||||
|
||||
class stopRpc(object):
|
||||
def __init__(self, attrs):
|
||||
self.attrs = attrs
|
||||
|
||||
def __call__(self):
|
||||
if self.attrs['reactor']:
|
||||
self.attrs['reactor'].close()
|
||||
if self.attrs['driver']:
|
||||
self.attrs['driver'].cleanup()
|
||||
|
||||
|
||||
class TestZmqBasics(test_utils.BaseTestCase):
|
||||
|
||||
@testtools.skipIf(impl_zmq is None, "zmq not available")
|
||||
def setUp(self):
|
||||
super(TestZmqBasics, self).setUp()
|
||||
self.messaging_conf.transport_driver = 'zmq'
|
||||
# Get driver
|
||||
transport = oslo_messaging.get_transport(self.conf)
|
||||
self.driver = transport._driver
|
||||
|
||||
# Set config values
|
||||
self.internal_ipc_dir = self.useFixture(fixtures.TempDir()).path
|
||||
kwargs = {'rpc_zmq_bind_address': '127.0.0.1',
|
||||
'rpc_zmq_host': '127.0.0.1',
|
||||
'rpc_response_timeout': 5,
|
||||
'rpc_zmq_port': get_unused_port(),
|
||||
'rpc_zmq_ipc_dir': self.internal_ipc_dir}
|
||||
self.config(**kwargs)
|
||||
|
||||
# Start RPC
|
||||
LOG.info("Running internal zmq receiver.")
|
||||
self.reactor = impl_zmq.ZmqProxy(self.conf)
|
||||
self.reactor.consume_in_thread()
|
||||
|
||||
self.matchmaker = impl_zmq._get_matchmaker(host='127.0.0.1')
|
||||
self.addCleanup(stopRpc(self.__dict__))
|
||||
|
||||
def test_start_stop_listener(self):
|
||||
target = oslo_messaging.Target(topic='testtopic')
|
||||
listener = self.driver.listen(target)
|
||||
result = listener.poll(0.01)
|
||||
self.assertEqual(result, None)
|
||||
|
||||
def test_send_receive_raises(self):
|
||||
"""Call() without method."""
|
||||
target = oslo_messaging.Target(topic='testtopic')
|
||||
self.driver.listen(target)
|
||||
self.assertRaises(
|
||||
KeyError,
|
||||
self.driver.send,
|
||||
target, {}, {'tx_id': 1}, wait_for_reply=True)
|
||||
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq.ZmqIncomingMessage')
|
||||
def test_send_receive_topic(self, mock_msg):
|
||||
"""Call() with method."""
|
||||
mock_msg.return_value = msg = mock.MagicMock()
|
||||
msg.received = received = mock.MagicMock()
|
||||
received.failure = False
|
||||
received.reply = True
|
||||
msg.condition = condition = mock.MagicMock()
|
||||
condition.wait.return_value = True
|
||||
|
||||
target = oslo_messaging.Target(topic='testtopic')
|
||||
self.driver.listen(target)
|
||||
result = self.driver.send(
|
||||
target, {},
|
||||
{'method': 'hello-world', 'tx_id': 1},
|
||||
wait_for_reply=True)
|
||||
self.assertEqual(result, True)
|
||||
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq._call', autospec=True)
|
||||
def test_send_receive_fanout(self, mock_call):
|
||||
target = oslo_messaging.Target(topic='testtopic', fanout=True)
|
||||
self.driver.listen(target)
|
||||
|
||||
mock_call.__name__ = '_call'
|
||||
mock_call.return_value = [True]
|
||||
|
||||
result = self.driver.send(
|
||||
target, {},
|
||||
{'method': 'hello-world', 'tx_id': 1},
|
||||
wait_for_reply=True)
|
||||
|
||||
self.assertEqual(result, True)
|
||||
mock_call.assert_called_once_with(
|
||||
'tcp://127.0.0.1:%s' % self.conf['rpc_zmq_port'],
|
||||
{}, 'fanout~testtopic.127.0.0.1',
|
||||
{'tx_id': 1, 'method': 'hello-world'},
|
||||
None, False, [])
|
||||
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq._call', autospec=True)
|
||||
def test_send_receive_direct(self, mock_call):
|
||||
# Also verifies fix for bug http://pad.lv/1301723
|
||||
target = oslo_messaging.Target(topic='testtopic', server='localhost')
|
||||
self.driver.listen(target)
|
||||
|
||||
mock_call.__name__ = '_call'
|
||||
mock_call.return_value = [True]
|
||||
|
||||
result = self.driver.send(
|
||||
target, {},
|
||||
{'method': 'hello-world', 'tx_id': 1},
|
||||
wait_for_reply=True)
|
||||
|
||||
self.assertEqual(result, True)
|
||||
mock_call.assert_called_once_with(
|
||||
'tcp://localhost:%s' % self.conf['rpc_zmq_port'],
|
||||
{}, 'testtopic.localhost',
|
||||
{'tx_id': 1, 'method': 'hello-world'},
|
||||
None, False, [])
|
||||
|
||||
|
||||
class TestZmqSocket(test_utils.BaseTestCase):
|
||||
|
||||
@testtools.skipIf(impl_zmq is None, "zmq not available")
|
||||
def setUp(self):
|
||||
super(TestZmqSocket, self).setUp()
|
||||
self.messaging_conf.transport_driver = 'zmq'
|
||||
# Get driver
|
||||
transport = oslo_messaging.get_transport(self.conf)
|
||||
self.driver = transport._driver
|
||||
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq.ZmqSocket.subscribe')
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq.zmq.Context')
|
||||
def test_zmqsocket_init_type_pull(self, mock_context, mock_subscribe):
|
||||
mock_ctxt = mock.Mock()
|
||||
mock_context.return_value = mock_ctxt
|
||||
mock_sock = mock.Mock()
|
||||
mock_ctxt.socket = mock.Mock(return_value=mock_sock)
|
||||
mock_sock.connect = mock.Mock()
|
||||
mock_sock.bind = mock.Mock()
|
||||
addr = '127.0.0.1'
|
||||
|
||||
sock = impl_zmq.ZmqSocket(addr, impl_zmq.zmq.PULL, bind=False,
|
||||
subscribe=None)
|
||||
self.assertTrue(sock.can_recv)
|
||||
self.assertFalse(sock.can_send)
|
||||
self.assertFalse(sock.can_sub)
|
||||
self.assertTrue(mock_sock.connect.called)
|
||||
self.assertFalse(mock_sock.bind.called)
|
||||
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq.ZmqSocket.subscribe')
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq.zmq.Context')
|
||||
def test_zmqsocket_init_type_sub(self, mock_context, mock_subscribe):
|
||||
mock_ctxt = mock.Mock()
|
||||
mock_context.return_value = mock_ctxt
|
||||
mock_sock = mock.Mock()
|
||||
mock_ctxt.socket = mock.Mock(return_value=mock_sock)
|
||||
mock_sock.connect = mock.Mock()
|
||||
mock_sock.bind = mock.Mock()
|
||||
addr = '127.0.0.1'
|
||||
|
||||
sock = impl_zmq.ZmqSocket(addr, impl_zmq.zmq.SUB, bind=False,
|
||||
subscribe=None)
|
||||
self.assertTrue(sock.can_recv)
|
||||
self.assertFalse(sock.can_send)
|
||||
self.assertTrue(sock.can_sub)
|
||||
self.assertTrue(mock_sock.connect.called)
|
||||
self.assertFalse(mock_sock.bind.called)
|
||||
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq.ZmqSocket.subscribe')
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq.zmq.Context')
|
||||
def test_zmqsocket_init_type_push(self, mock_context, mock_subscribe):
|
||||
mock_ctxt = mock.Mock()
|
||||
mock_context.return_value = mock_ctxt
|
||||
mock_sock = mock.Mock()
|
||||
mock_ctxt.socket = mock.Mock(return_value=mock_sock)
|
||||
mock_sock.connect = mock.Mock()
|
||||
mock_sock.bind = mock.Mock()
|
||||
addr = '127.0.0.1'
|
||||
|
||||
sock = impl_zmq.ZmqSocket(addr, impl_zmq.zmq.PUSH, bind=False,
|
||||
subscribe=None)
|
||||
self.assertFalse(sock.can_recv)
|
||||
self.assertTrue(sock.can_send)
|
||||
self.assertFalse(sock.can_sub)
|
||||
self.assertTrue(mock_sock.connect.called)
|
||||
self.assertFalse(mock_sock.bind.called)
|
||||
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq.ZmqSocket.subscribe')
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq.zmq.Context')
|
||||
def test_zmqsocket_init_type_pub(self, mock_context, mock_subscribe):
|
||||
mock_ctxt = mock.Mock()
|
||||
mock_context.return_value = mock_ctxt
|
||||
mock_sock = mock.Mock()
|
||||
mock_ctxt.socket = mock.Mock(return_value=mock_sock)
|
||||
mock_sock.connect = mock.Mock()
|
||||
mock_sock.bind = mock.Mock()
|
||||
addr = '127.0.0.1'
|
||||
|
||||
sock = impl_zmq.ZmqSocket(addr, impl_zmq.zmq.PUB, bind=False,
|
||||
subscribe=None)
|
||||
self.assertFalse(sock.can_recv)
|
||||
self.assertTrue(sock.can_send)
|
||||
self.assertFalse(sock.can_sub)
|
||||
self.assertTrue(mock_sock.connect.called)
|
||||
self.assertFalse(mock_sock.bind.called)
|
||||
|
||||
|
||||
class TestZmqIncomingMessage(test_utils.BaseTestCase):
|
||||
|
||||
@testtools.skipIf(impl_zmq is None, "zmq not available")
|
||||
def setUp(self):
|
||||
super(TestZmqIncomingMessage, self).setUp()
|
||||
self.messaging_conf.transport_driver = 'zmq'
|
||||
# Get driver
|
||||
transport = oslo_messaging.get_transport(self.conf)
|
||||
self.driver = transport._driver
|
||||
|
||||
def test_zmqincomingmessage(self):
|
||||
msg = impl_zmq.ZmqIncomingMessage(mock.Mock(), None, 'msg.foo')
|
||||
msg.reply("abc")
|
||||
self.assertIsInstance(
|
||||
msg.received, impl_zmq.ZmqIncomingMessage.ReceivedReply)
|
||||
self.assertIsInstance(
|
||||
msg.received, impl_zmq.ZmqIncomingMessage.ReceivedReply)
|
||||
self.assertEqual(msg.received.reply, "abc")
|
||||
msg.requeue()
|
||||
|
||||
|
||||
class TestZmqConnection(test_utils.BaseTestCase):
|
||||
|
||||
@testtools.skipIf(impl_zmq is None, "zmq not available")
|
||||
def setUp(self):
|
||||
super(TestZmqConnection, self).setUp()
|
||||
self.messaging_conf.transport_driver = 'zmq'
|
||||
# Get driver
|
||||
transport = oslo_messaging.get_transport(self.conf)
|
||||
self.driver = transport._driver
|
||||
|
||||
# Set config values
|
||||
self.internal_ipc_dir = self.useFixture(fixtures.TempDir()).path
|
||||
kwargs = {'rpc_zmq_bind_address': '127.0.0.1',
|
||||
'rpc_zmq_host': '127.0.0.1',
|
||||
'rpc_response_timeout': 5,
|
||||
'rpc_zmq_port': get_unused_port(),
|
||||
'rpc_zmq_ipc_dir': self.internal_ipc_dir}
|
||||
self.config(**kwargs)
|
||||
|
||||
# Start RPC
|
||||
LOG.info("Running internal zmq receiver.")
|
||||
self.reactor = impl_zmq.ZmqProxy(self.conf)
|
||||
self.reactor.consume_in_thread()
|
||||
|
||||
self.matchmaker = impl_zmq._get_matchmaker(host='127.0.0.1')
|
||||
self.addCleanup(stopRpc(self.__dict__))
|
||||
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq.ZmqReactor', autospec=True)
|
||||
def test_zmqconnection_create_consumer(self, mock_reactor):
|
||||
|
||||
mock_reactor.register = mock.Mock()
|
||||
conn = impl_zmq.Connection(self.driver)
|
||||
topic = 'topic.foo'
|
||||
context = mock.Mock()
|
||||
inaddr = ('ipc://%s/zmq_topic_topic.127.0.0.1' %
|
||||
(self.internal_ipc_dir))
|
||||
# No Fanout
|
||||
conn.create_consumer(topic, context)
|
||||
conn.reactor.register.assert_called_with(context, inaddr,
|
||||
impl_zmq.zmq.PULL,
|
||||
subscribe=None, in_bind=False)
|
||||
|
||||
# Reset for next bunch of checks
|
||||
conn.reactor.register.reset_mock()
|
||||
|
||||
# Fanout
|
||||
inaddr = ('ipc://%s/zmq_topic_fanout~topic' %
|
||||
(self.internal_ipc_dir))
|
||||
conn.create_consumer(topic, context, fanout='subscriber.foo')
|
||||
conn.reactor.register.assert_called_with(context, inaddr,
|
||||
impl_zmq.zmq.SUB,
|
||||
subscribe='subscriber.foo',
|
||||
in_bind=False)
|
||||
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq.ZmqReactor', autospec=True)
|
||||
def test_zmqconnection_create_consumer_topic_exists(self, mock_reactor):
|
||||
mock_reactor.register = mock.Mock()
|
||||
conn = impl_zmq.Connection(self.driver)
|
||||
topic = 'topic.foo'
|
||||
context = mock.Mock()
|
||||
inaddr = ('ipc://%s/zmq_topic_topic.127.0.0.1' %
|
||||
(self.internal_ipc_dir))
|
||||
|
||||
conn.create_consumer(topic, context)
|
||||
conn.reactor.register.assert_called_with(
|
||||
context, inaddr, impl_zmq.zmq.PULL, subscribe=None, in_bind=False)
|
||||
conn.reactor.register.reset_mock()
|
||||
# Call again with same topic
|
||||
conn.create_consumer(topic, context)
|
||||
self.assertFalse(conn.reactor.register.called)
|
||||
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq._get_matchmaker',
|
||||
autospec=True)
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq.ZmqReactor', autospec=True)
|
||||
def test_zmqconnection_close(self, mock_reactor, mock_getmatchmaker):
|
||||
conn = impl_zmq.Connection(self.driver)
|
||||
conn.reactor.close = mock.Mock()
|
||||
mock_getmatchmaker.return_value.stop_heartbeat = mock.Mock()
|
||||
conn.close()
|
||||
self.assertTrue(mock_getmatchmaker.return_value.stop_heartbeat.called)
|
||||
self.assertTrue(conn.reactor.close.called)
|
||||
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq.ZmqReactor', autospec=True)
|
||||
def test_zmqconnection_wait(self, mock_reactor):
|
||||
conn = impl_zmq.Connection(self.driver)
|
||||
conn.reactor.wait = mock.Mock()
|
||||
conn.wait()
|
||||
self.assertTrue(conn.reactor.wait.called)
|
||||
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq._get_matchmaker',
|
||||
autospec=True)
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq.ZmqReactor', autospec=True)
|
||||
def test_zmqconnection_consume_in_thread(self, mock_reactor,
|
||||
mock_getmatchmaker):
|
||||
mock_getmatchmaker.return_value.start_heartbeat = mock.Mock()
|
||||
conn = impl_zmq.Connection(self.driver)
|
||||
conn.reactor.consume_in_thread = mock.Mock()
|
||||
conn.consume_in_thread()
|
||||
self.assertTrue(mock_getmatchmaker.return_value.start_heartbeat.called)
|
||||
self.assertTrue(conn.reactor.consume_in_thread.called)
|
||||
|
||||
|
||||
class TestZmqListener(test_utils.BaseTestCase):
|
||||
|
||||
@testtools.skipIf(impl_zmq is None, "zmq not available")
|
||||
def setUp(self):
|
||||
super(TestZmqListener, self).setUp()
|
||||
self.messaging_conf.transport_driver = 'zmq'
|
||||
# Get driver
|
||||
transport = oslo_messaging.get_transport(self.conf)
|
||||
self.driver = transport._driver
|
||||
|
||||
# Set config values
|
||||
self.internal_ipc_dir = self.useFixture(fixtures.TempDir()).path
|
||||
kwargs = {'rpc_zmq_bind_address': '127.0.0.1',
|
||||
'rpc_zmq_host': '127.0.0.1',
|
||||
'rpc_response_timeout': 5,
|
||||
'rpc_zmq_port': get_unused_port(),
|
||||
'rpc_zmq_ipc_dir': self.internal_ipc_dir}
|
||||
self.config(**kwargs)
|
||||
|
||||
# Start RPC
|
||||
LOG.info("Running internal zmq receiver.")
|
||||
self.reactor = impl_zmq.ZmqProxy(self.conf)
|
||||
self.reactor.consume_in_thread()
|
||||
|
||||
self.matchmaker = impl_zmq._get_matchmaker(host='127.0.0.1')
|
||||
self.addCleanup(stopRpc(self.__dict__))
|
||||
|
||||
def test_zmqlistener_no_msg(self):
|
||||
listener = impl_zmq.ZmqListener(self.driver)
|
||||
# Timeout = 0 should return straight away since the queue is empty
|
||||
listener.poll(timeout=0)
|
||||
|
||||
def test_zmqlistener_w_msg(self):
|
||||
listener = impl_zmq.ZmqListener(self.driver)
|
||||
kwargs = {'a': 1, 'b': 2}
|
||||
m = mock.Mock()
|
||||
ctxt = mock.Mock(autospec=impl_zmq.RpcContext)
|
||||
eventlet.spawn_n(listener.dispatch, ctxt, 0,
|
||||
m.fake_method, 'name.space', **kwargs)
|
||||
resp = listener.poll(timeout=10)
|
||||
msg = {'method': m.fake_method, 'namespace': 'name.space',
|
||||
'args': kwargs}
|
||||
self.assertEqual(resp.message, msg)
|
||||
|
||||
|
||||
class TestZmqDriver(test_utils.BaseTestCase):
|
||||
|
||||
@testtools.skipIf(impl_zmq is None, "zmq not available")
|
||||
def setUp(self):
|
||||
super(TestZmqDriver, self).setUp()
|
||||
self.messaging_conf.transport_driver = 'zmq'
|
||||
# Get driver
|
||||
transport = oslo_messaging.get_transport(self.conf)
|
||||
self.driver = transport._driver
|
||||
|
||||
# Set config values
|
||||
self.internal_ipc_dir = self.useFixture(fixtures.TempDir()).path
|
||||
kwargs = {'rpc_zmq_bind_address': '127.0.0.1',
|
||||
'rpc_zmq_host': '127.0.0.1',
|
||||
'rpc_response_timeout': 5,
|
||||
'rpc_zmq_port': get_unused_port(),
|
||||
'rpc_zmq_ipc_dir': self.internal_ipc_dir}
|
||||
self.config(**kwargs)
|
||||
|
||||
# Start RPC
|
||||
LOG.info("Running internal zmq receiver.")
|
||||
self.reactor = impl_zmq.ZmqProxy(self.conf)
|
||||
self.reactor.consume_in_thread()
|
||||
|
||||
self.matchmaker = impl_zmq._get_matchmaker(host='127.0.0.1')
|
||||
self.addCleanup(stopRpc(self.__dict__))
|
||||
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq._cast', autospec=True)
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq._multi_send', autospec=True)
|
||||
def test_zmqdriver_send(self, mock_multi_send, mock_cast):
|
||||
context = mock.Mock(autospec=impl_zmq.RpcContext)
|
||||
topic = 'testtopic'
|
||||
msg = 'jeronimo'
|
||||
self.driver.send(oslo_messaging.Target(topic=topic), context, msg,
|
||||
False, 0, False)
|
||||
mock_multi_send.assert_called_with(mock_cast, context, topic, msg,
|
||||
allowed_remote_exmods=[],
|
||||
envelope=False)
|
||||
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq._cast', autospec=True)
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq._multi_send', autospec=True)
|
||||
def test_zmqdriver_send_notification(self, mock_multi_send, mock_cast):
|
||||
context = mock.Mock(autospec=impl_zmq.RpcContext)
|
||||
topic = 'testtopic.foo'
|
||||
topic_reformat = 'testtopic-foo'
|
||||
msg = 'jeronimo'
|
||||
self.driver.send_notification(oslo_messaging.Target(topic=topic),
|
||||
context, msg, False, False)
|
||||
mock_multi_send.assert_called_with(mock_cast, context, topic_reformat,
|
||||
msg, allowed_remote_exmods=[],
|
||||
envelope=False)
|
||||
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq.ZmqListener', autospec=True)
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq.Connection', autospec=True)
|
||||
def test_zmqdriver_listen(self, mock_connection, mock_listener):
|
||||
mock_listener.return_value = listener = mock.Mock()
|
||||
mock_connection.return_value = conn = mock.Mock()
|
||||
conn.create_consumer = mock.Mock()
|
||||
conn.consume_in_thread = mock.Mock()
|
||||
topic = 'testtopic.foo'
|
||||
self.driver.listen(oslo_messaging.Target(topic=topic))
|
||||
conn.create_consumer.assert_called_with(topic, listener, fanout=True)
|
||||
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq.ZmqListener', autospec=True)
|
||||
@mock.patch('oslo_messaging._drivers.impl_zmq.Connection', autospec=True)
|
||||
def test_zmqdriver_listen_for_notification(self, mock_connection,
|
||||
mock_listener):
|
||||
mock_listener.return_value = listener = mock.Mock()
|
||||
mock_connection.return_value = conn = mock.Mock()
|
||||
conn.create_consumer = mock.Mock()
|
||||
conn.consume_in_thread = mock.Mock()
|
||||
topic = 'testtopic.foo'
|
||||
data = [(oslo_messaging.Target(topic=topic), 0)]
|
||||
# NOTE(jamespage): Pooling not supported, just pass None for now.
|
||||
self.driver.listen_for_notifications(data, None)
|
||||
conn.create_consumer.assert_called_with("%s-%s" % (topic, 0), listener)
|
69
oslo_messaging/tests/drivers/test_matchmaker.py
Normal file
69
oslo_messaging/tests/drivers/test_matchmaker.py
Normal file
@ -0,0 +1,69 @@
|
||||
# Copyright 2014 Canonical, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import testtools
|
||||
|
||||
from oslo.utils import importutils
|
||||
from oslo_messaging.tests import utils as test_utils
|
||||
|
||||
# NOTE(jamespage) matchmaker tied directly to eventlet
|
||||
# which is not yet py3 compatible - skip if import fails
|
||||
matchmaker = (
|
||||
importutils.try_import('oslo_messaging._drivers.matchmaker'))
|
||||
|
||||
|
||||
@testtools.skipIf(not matchmaker, "matchmaker/eventlet unavailable")
|
||||
class MatchmakerTest(test_utils.BaseTestCase):
|
||||
|
||||
def test_fanout_binding(self):
|
||||
matcher = matchmaker.MatchMakerBase()
|
||||
matcher.add_binding(
|
||||
matchmaker.FanoutBinding(), matchmaker.DirectExchange())
|
||||
self.assertEqual(matcher.queues('hello.world'), [])
|
||||
self.assertEqual(
|
||||
matcher.queues('fanout~fantasy.unicorn'),
|
||||
[('fanout~fantasy.unicorn', 'unicorn')])
|
||||
self.assertEqual(
|
||||
matcher.queues('fanout~fantasy.pony'),
|
||||
[('fanout~fantasy.pony', 'pony')])
|
||||
|
||||
def test_topic_binding(self):
|
||||
matcher = matchmaker.MatchMakerBase()
|
||||
matcher.add_binding(
|
||||
matchmaker.TopicBinding(), matchmaker.StubExchange())
|
||||
self.assertEqual(
|
||||
matcher.queues('hello-world'), [('hello-world', None)])
|
||||
|
||||
def test_direct_binding(self):
|
||||
matcher = matchmaker.MatchMakerBase()
|
||||
matcher.add_binding(
|
||||
matchmaker.DirectBinding(), matchmaker.StubExchange())
|
||||
self.assertEqual(
|
||||
matcher.queues('hello.server'), [('hello.server', None)])
|
||||
self.assertEqual(matcher.queues('hello-world'), [])
|
||||
|
||||
def test_localhost_match(self):
|
||||
matcher = matchmaker.MatchMakerLocalhost()
|
||||
self.assertEqual(
|
||||
matcher.queues('hello.server'), [('hello.server', 'server')])
|
||||
|
||||
# Gets remapped due to localhost exchange
|
||||
# all bindings default to first match.
|
||||
self.assertEqual(
|
||||
matcher.queues('fanout~testing.server'),
|
||||
[('fanout~testing.localhost', 'localhost')])
|
||||
|
||||
self.assertEqual(
|
||||
matcher.queues('hello-world'),
|
||||
[('hello-world.localhost', 'localhost')])
|
78
oslo_messaging/tests/drivers/test_matchmaker_redis.py
Normal file
78
oslo_messaging/tests/drivers/test_matchmaker_redis.py
Normal file
@ -0,0 +1,78 @@
|
||||
# Copyright 2014 Canonical, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import testtools
|
||||
|
||||
from oslo.utils import importutils
|
||||
from oslo_messaging.tests import utils as test_utils
|
||||
|
||||
redis = importutils.try_import('redis')
|
||||
matchmaker_redis = (
|
||||
importutils.try_import('oslo_messaging._drivers.matchmaker_redis'))
|
||||
|
||||
|
||||
def redis_available():
|
||||
'''Helper to see if local redis server is running'''
|
||||
if not redis:
|
||||
return False
|
||||
try:
|
||||
c = redis.StrictRedis(socket_timeout=1)
|
||||
c.ping()
|
||||
return True
|
||||
except redis.exceptions.ConnectionError:
|
||||
return False
|
||||
|
||||
|
||||
@testtools.skipIf(not matchmaker_redis, "matchmaker/eventlet unavailable")
|
||||
@testtools.skipIf(not redis_available(), "redis unavailable")
|
||||
class RedisMatchMakerTest(test_utils.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(RedisMatchMakerTest, self).setUp()
|
||||
self.ring_data = {
|
||||
"conductor": ["controller1", "node1", "node2", "node3"],
|
||||
"scheduler": ["controller1", "node1", "node2", "node3"],
|
||||
"network": ["controller1", "node1", "node2", "node3"],
|
||||
"cert": ["controller1"],
|
||||
"console": ["controller1"],
|
||||
"consoleauth": ["controller1"]}
|
||||
self.matcher = matchmaker_redis.MatchMakerRedis()
|
||||
self.populate()
|
||||
|
||||
def tearDown(self):
|
||||
super(RedisMatchMakerTest, self).tearDown()
|
||||
c = redis.StrictRedis()
|
||||
c.flushdb()
|
||||
|
||||
def populate(self):
|
||||
for k, hosts in self.ring_data.items():
|
||||
for h in hosts:
|
||||
self.matcher.register(k, h)
|
||||
|
||||
def test_direct(self):
|
||||
self.assertEqual(
|
||||
self.matcher.queues('cert.controller1'),
|
||||
[('cert.controller1', 'controller1')])
|
||||
|
||||
def test_register(self):
|
||||
self.matcher.register('cert', 'keymaster')
|
||||
self.assertEqual(
|
||||
sorted(self.matcher.redis.smembers('cert')),
|
||||
['cert.controller1', 'cert.keymaster'])
|
||||
|
||||
def test_unregister(self):
|
||||
self.matcher.unregister('conductor', 'controller1')
|
||||
self.assertEqual(
|
||||
sorted(self.matcher.redis.smembers('conductor')),
|
||||
['conductor.node1', 'conductor.node2', 'conductor.node3'])
|
73
oslo_messaging/tests/drivers/test_matchmaker_ring.py
Normal file
73
oslo_messaging/tests/drivers/test_matchmaker_ring.py
Normal file
@ -0,0 +1,73 @@
|
||||
# Copyright 2014 Canonical, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import testtools
|
||||
|
||||
from oslo.utils import importutils
|
||||
from oslo_messaging.tests import utils as test_utils
|
||||
|
||||
# NOTE(jamespage) matchmaker tied directly to eventlet
|
||||
# which is not yet py3 compatible - skip if import fails
|
||||
matchmaker_ring = (
|
||||
importutils.try_import('oslo_messaging._drivers.matchmaker_ring'))
|
||||
|
||||
|
||||
@testtools.skipIf(not matchmaker_ring, "matchmaker/eventlet unavailable")
|
||||
class MatchmakerRingTest(test_utils.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(MatchmakerRingTest, self).setUp()
|
||||
self.ring_data = {
|
||||
"conductor": ["controller1", "node1", "node2", "node3"],
|
||||
"scheduler": ["controller1", "node1", "node2", "node3"],
|
||||
"network": ["controller1", "node1", "node2", "node3"],
|
||||
"cert": ["controller1"],
|
||||
"console": ["controller1"],
|
||||
"consoleauth": ["controller1"]}
|
||||
self.matcher = matchmaker_ring.MatchMakerRing(self.ring_data)
|
||||
|
||||
def test_direct(self):
|
||||
self.assertEqual(
|
||||
self.matcher.queues('cert.controller1'),
|
||||
[('cert.controller1', 'controller1')])
|
||||
self.assertEqual(
|
||||
self.matcher.queues('conductor.node1'),
|
||||
[('conductor.node1', 'node1')])
|
||||
|
||||
def test_fanout(self):
|
||||
self.assertEqual(
|
||||
self.matcher.queues('fanout~conductor'),
|
||||
[('fanout~conductor.controller1', 'controller1'),
|
||||
('fanout~conductor.node1', 'node1'),
|
||||
('fanout~conductor.node2', 'node2'),
|
||||
('fanout~conductor.node3', 'node3')])
|
||||
|
||||
def test_bare_topic(self):
|
||||
# Round robins through the hosts on the topic
|
||||
self.assertEqual(
|
||||
self.matcher.queues('scheduler'),
|
||||
[('scheduler.controller1', 'controller1')])
|
||||
self.assertEqual(
|
||||
self.matcher.queues('scheduler'),
|
||||
[('scheduler.node1', 'node1')])
|
||||
self.assertEqual(
|
||||
self.matcher.queues('scheduler'),
|
||||
[('scheduler.node2', 'node2')])
|
||||
self.assertEqual(
|
||||
self.matcher.queues('scheduler'),
|
||||
[('scheduler.node3', 'node3')])
|
||||
# Cycles loop
|
||||
self.assertEqual(
|
||||
self.matcher.queues('scheduler'),
|
||||
[('scheduler.controller1', 'controller1')])
|
124
oslo_messaging/tests/drivers/test_pool.py
Normal file
124
oslo_messaging/tests/drivers/test_pool.py
Normal file
@ -0,0 +1,124 @@
|
||||
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import threading
|
||||
import uuid
|
||||
|
||||
import testscenarios
|
||||
|
||||
from oslo_messaging._drivers import pool
|
||||
from oslo_messaging.tests import utils as test_utils
|
||||
|
||||
load_tests = testscenarios.load_tests_apply_scenarios
|
||||
|
||||
|
||||
class PoolTestCase(test_utils.BaseTestCase):
|
||||
|
||||
_max_size = [
|
||||
('default_size', dict(max_size=None, n_iters=4)),
|
||||
('set_max_size', dict(max_size=10, n_iters=10)),
|
||||
]
|
||||
|
||||
_create_error = [
|
||||
('no_create_error', dict(create_error=False)),
|
||||
('create_error', dict(create_error=True)),
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def generate_scenarios(cls):
|
||||
cls.scenarios = testscenarios.multiply_scenarios(cls._max_size,
|
||||
cls._create_error)
|
||||
|
||||
class TestPool(pool.Pool):
|
||||
|
||||
def create(self):
|
||||
return uuid.uuid4()
|
||||
|
||||
class ThreadWaitWaiter(object):
|
||||
|
||||
"""A gross hack.
|
||||
|
||||
Stub out the condition variable's wait() method and spin until it
|
||||
has been called by each thread.
|
||||
"""
|
||||
|
||||
def __init__(self, cond, n_threads, stubs):
|
||||
self.cond = cond
|
||||
self.stubs = stubs
|
||||
self.n_threads = n_threads
|
||||
self.n_waits = 0
|
||||
self.orig_wait = cond.wait
|
||||
|
||||
def count_waits(**kwargs):
|
||||
self.n_waits += 1
|
||||
self.orig_wait(**kwargs)
|
||||
self.stubs.Set(self.cond, 'wait', count_waits)
|
||||
|
||||
def wait(self):
|
||||
while self.n_waits < self.n_threads:
|
||||
pass
|
||||
self.stubs.Set(self.cond, 'wait', self.orig_wait)
|
||||
|
||||
def test_pool(self):
|
||||
kwargs = {}
|
||||
if self.max_size is not None:
|
||||
kwargs['max_size'] = self.max_size
|
||||
|
||||
p = self.TestPool(**kwargs)
|
||||
|
||||
if self.create_error:
|
||||
def create_error():
|
||||
raise RuntimeError
|
||||
orig_create = p.create
|
||||
self.stubs.Set(p, 'create', create_error)
|
||||
self.assertRaises(RuntimeError, p.get)
|
||||
self.stubs.Set(p, 'create', orig_create)
|
||||
|
||||
objs = []
|
||||
for i in range(self.n_iters):
|
||||
objs.append(p.get())
|
||||
self.assertIsInstance(objs[i], uuid.UUID)
|
||||
|
||||
def wait_for_obj():
|
||||
o = p.get()
|
||||
self.assertIn(o, objs)
|
||||
|
||||
waiter = self.ThreadWaitWaiter(p._cond, self.n_iters, self.stubs)
|
||||
|
||||
threads = []
|
||||
for i in range(self.n_iters):
|
||||
t = threading.Thread(target=wait_for_obj)
|
||||
t.start()
|
||||
threads.append(t)
|
||||
|
||||
waiter.wait()
|
||||
|
||||
for o in objs:
|
||||
p.put(o)
|
||||
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
for o in objs:
|
||||
p.put(o)
|
||||
|
||||
for o in p.iter_free():
|
||||
self.assertIn(o, objs)
|
||||
objs.remove(o)
|
||||
|
||||
self.assertEqual([], objs)
|
||||
|
||||
|
||||
PoolTestCase.generate_scenarios()
|
0
oslo_messaging/tests/executors/__init__.py
Normal file
0
oslo_messaging/tests/executors/__init__.py
Normal file
@ -25,13 +25,13 @@ import mock
|
||||
import testscenarios
|
||||
import testtools
|
||||
|
||||
from oslo.messaging._executors import impl_blocking
|
||||
from oslo_messaging._executors import impl_blocking
|
||||
try:
|
||||
from oslo.messaging._executors import impl_eventlet
|
||||
from oslo_messaging._executors import impl_eventlet
|
||||
except ImportError:
|
||||
impl_eventlet = None
|
||||
from oslo.messaging._executors import impl_thread
|
||||
from tests import utils as test_utils
|
||||
from oslo_messaging._executors import impl_thread
|
||||
from oslo_messaging.tests import utils as test_utils
|
||||
|
||||
load_tests = testscenarios.load_tests_apply_scenarios
|
||||
|
0
oslo_messaging/tests/functional/__init__.py
Normal file
0
oslo_messaging/tests/functional/__init__.py
Normal file
284
oslo_messaging/tests/functional/test_functional.py
Normal file
284
oslo_messaging/tests/functional/test_functional.py
Normal file
@ -0,0 +1,284 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import oslo_messaging
|
||||
|
||||
from testtools import matchers
|
||||
|
||||
from oslo_messaging.tests.functional import utils
|
||||
|
||||
|
||||
class CallTestCase(utils.SkipIfNoTransportURL):
|
||||
def test_specific_server(self):
|
||||
group = self.useFixture(utils.RpcServerGroupFixture(self.url))
|
||||
client = group.client(1)
|
||||
client.append(text='open')
|
||||
self.assertEqual('openstack', client.append(text='stack'))
|
||||
client.add(increment=2)
|
||||
self.assertEqual(12, client.add(increment=10))
|
||||
self.assertEqual(9, client.subtract(increment=3))
|
||||
self.assertEqual('openstack', group.servers[1].endpoint.sval)
|
||||
self.assertEqual(9, group.servers[1].endpoint.ival)
|
||||
for i in [0, 2]:
|
||||
self.assertEqual('', group.servers[i].endpoint.sval)
|
||||
self.assertEqual(0, group.servers[i].endpoint.ival)
|
||||
|
||||
def test_server_in_group(self):
|
||||
group = self.useFixture(utils.RpcServerGroupFixture(self.url))
|
||||
|
||||
client = group.client()
|
||||
data = [c for c in 'abcdefghijklmn']
|
||||
for i in data:
|
||||
client.append(text=i)
|
||||
|
||||
for s in group.servers:
|
||||
self.assertThat(len(s.endpoint.sval), matchers.GreaterThan(0))
|
||||
actual = [[c for c in s.endpoint.sval] for s in group.servers]
|
||||
self.assertThat(actual, utils.IsValidDistributionOf(data))
|
||||
|
||||
def test_different_exchanges(self):
|
||||
t = self.useFixture(utils.TransportFixture(self.url))
|
||||
# If the different exchanges are not honoured, then the
|
||||
# teardown may hang unless we broadcast all control messages
|
||||
# to each server
|
||||
group1 = self.useFixture(
|
||||
utils.RpcServerGroupFixture(self.url, transport=t,
|
||||
use_fanout_ctrl=True))
|
||||
group2 = self.useFixture(
|
||||
utils.RpcServerGroupFixture(self.url, exchange="a",
|
||||
transport=t,
|
||||
use_fanout_ctrl=True))
|
||||
group3 = self.useFixture(
|
||||
utils.RpcServerGroupFixture(self.url, exchange="b",
|
||||
transport=t,
|
||||
use_fanout_ctrl=True))
|
||||
|
||||
client1 = group1.client(1)
|
||||
data1 = [c for c in 'abcdefghijklmn']
|
||||
for i in data1:
|
||||
client1.append(text=i)
|
||||
|
||||
client2 = group2.client()
|
||||
data2 = [c for c in 'opqrstuvwxyz']
|
||||
for i in data2:
|
||||
client2.append(text=i)
|
||||
|
||||
actual1 = [[c for c in s.endpoint.sval] for s in group1.servers]
|
||||
self.assertThat(actual1, utils.IsValidDistributionOf(data1))
|
||||
actual1 = [c for c in group1.servers[1].endpoint.sval]
|
||||
self.assertThat([actual1], utils.IsValidDistributionOf(data1))
|
||||
for s in group1.servers:
|
||||
expected = len(data1) if group1.servers.index(s) == 1 else 0
|
||||
self.assertEqual(expected, len(s.endpoint.sval))
|
||||
self.assertEqual(0, s.endpoint.ival)
|
||||
|
||||
actual2 = [[c for c in s.endpoint.sval] for s in group2.servers]
|
||||
for s in group2.servers:
|
||||
self.assertThat(len(s.endpoint.sval), matchers.GreaterThan(0))
|
||||
self.assertEqual(0, s.endpoint.ival)
|
||||
self.assertThat(actual2, utils.IsValidDistributionOf(data2))
|
||||
|
||||
for s in group3.servers:
|
||||
self.assertEqual(0, len(s.endpoint.sval))
|
||||
self.assertEqual(0, s.endpoint.ival)
|
||||
|
||||
def test_timeout(self):
|
||||
transport = self.useFixture(utils.TransportFixture(self.url))
|
||||
target = oslo_messaging.Target(topic="no_such_topic")
|
||||
c = utils.ClientStub(transport.transport, target, timeout=1)
|
||||
self.assertThat(c.ping,
|
||||
matchers.raises(oslo_messaging.MessagingTimeout))
|
||||
|
||||
def test_exception(self):
|
||||
group = self.useFixture(utils.RpcServerGroupFixture(self.url))
|
||||
client = group.client(1)
|
||||
client.add(increment=2)
|
||||
f = lambda: client.subtract(increment=3)
|
||||
self.assertThat(f, matchers.raises(ValueError))
|
||||
|
||||
|
||||
class CastTestCase(utils.SkipIfNoTransportURL):
|
||||
# Note: casts return immediately, so these tests utilise a special
|
||||
# internal sync() cast to ensure prior casts are complete before
|
||||
# making the necessary assertions.
|
||||
|
||||
def test_specific_server(self):
|
||||
group = self.useFixture(utils.RpcServerGroupFixture(self.url))
|
||||
client = group.client(1, cast=True)
|
||||
client.append(text='open')
|
||||
client.append(text='stack')
|
||||
client.add(increment=2)
|
||||
client.add(increment=10)
|
||||
group.sync()
|
||||
|
||||
self.assertEqual('openstack', group.servers[1].endpoint.sval)
|
||||
self.assertEqual(12, group.servers[1].endpoint.ival)
|
||||
for i in [0, 2]:
|
||||
self.assertEqual('', group.servers[i].endpoint.sval)
|
||||
self.assertEqual(0, group.servers[i].endpoint.ival)
|
||||
|
||||
def test_server_in_group(self):
|
||||
group = self.useFixture(utils.RpcServerGroupFixture(self.url))
|
||||
client = group.client(cast=True)
|
||||
for i in range(20):
|
||||
client.add(increment=1)
|
||||
group.sync()
|
||||
total = 0
|
||||
for s in group.servers:
|
||||
ival = s.endpoint.ival
|
||||
self.assertThat(ival, matchers.GreaterThan(0))
|
||||
self.assertThat(ival, matchers.LessThan(20))
|
||||
total += ival
|
||||
self.assertEqual(20, total)
|
||||
|
||||
def test_fanout(self):
|
||||
group = self.useFixture(utils.RpcServerGroupFixture(self.url))
|
||||
client = group.client('all', cast=True)
|
||||
client.append(text='open')
|
||||
client.append(text='stack')
|
||||
client.add(increment=2)
|
||||
client.add(increment=10)
|
||||
group.sync(server='all')
|
||||
for s in group.servers:
|
||||
self.assertEqual('openstack', s.endpoint.sval)
|
||||
self.assertEqual(12, s.endpoint.ival)
|
||||
|
||||
|
||||
class NotifyTestCase(utils.SkipIfNoTransportURL):
|
||||
# NOTE(sileht): Each test must not use the same topics
|
||||
# to be run in parallel
|
||||
|
||||
def test_simple(self):
|
||||
transport = self.useFixture(utils.TransportFixture(self.url))
|
||||
listener = self.useFixture(
|
||||
utils.NotificationFixture(transport.transport,
|
||||
['test_simple']))
|
||||
transport.wait()
|
||||
notifier = listener.notifier('abc')
|
||||
|
||||
notifier.info({}, 'test', 'Hello World!')
|
||||
event = listener.events.get(timeout=1)
|
||||
self.assertEqual('info', event[0])
|
||||
self.assertEqual('test', event[1])
|
||||
self.assertEqual('Hello World!', event[2])
|
||||
self.assertEqual('abc', event[3])
|
||||
|
||||
def test_multiple_topics(self):
|
||||
transport = self.useFixture(utils.TransportFixture(self.url))
|
||||
listener = self.useFixture(
|
||||
utils.NotificationFixture(transport.transport,
|
||||
['a', 'b']))
|
||||
transport.wait()
|
||||
a = listener.notifier('pub-a', topic='a')
|
||||
b = listener.notifier('pub-b', topic='b')
|
||||
|
||||
sent = {
|
||||
'pub-a': [a, 'test-a', 'payload-a'],
|
||||
'pub-b': [b, 'test-b', 'payload-b']
|
||||
}
|
||||
for e in sent.values():
|
||||
e[0].info({}, e[1], e[2])
|
||||
|
||||
received = {}
|
||||
while len(received) < len(sent):
|
||||
e = listener.events.get(timeout=1)
|
||||
received[e[3]] = e
|
||||
|
||||
for key in received:
|
||||
actual = received[key]
|
||||
expected = sent[key]
|
||||
self.assertEqual('info', actual[0])
|
||||
self.assertEqual(expected[1], actual[1])
|
||||
self.assertEqual(expected[2], actual[2])
|
||||
|
||||
def test_multiple_servers(self):
|
||||
transport = self.useFixture(utils.TransportFixture(self.url))
|
||||
listener_a = self.useFixture(
|
||||
utils.NotificationFixture(transport.transport,
|
||||
['test-topic']))
|
||||
listener_b = self.useFixture(
|
||||
utils.NotificationFixture(transport.transport,
|
||||
['test-topic']))
|
||||
transport.wait()
|
||||
n = listener_a.notifier('pub')
|
||||
|
||||
events_out = [('test-%s' % c, 'payload-%s' % c) for c in 'abcdefgh']
|
||||
|
||||
for event_type, payload in events_out:
|
||||
n.info({}, event_type, payload)
|
||||
|
||||
events_in = [[(e[1], e[2]) for e in listener_a.get_events()],
|
||||
[(e[1], e[2]) for e in listener_b.get_events()]]
|
||||
|
||||
self.assertThat(events_in, utils.IsValidDistributionOf(events_out))
|
||||
for stream in events_in:
|
||||
self.assertThat(len(stream), matchers.GreaterThan(0))
|
||||
|
||||
def test_independent_topics(self):
|
||||
transport = self.useFixture(utils.TransportFixture(self.url))
|
||||
listener_a = self.useFixture(
|
||||
utils.NotificationFixture(transport.transport,
|
||||
['1']))
|
||||
listener_b = self.useFixture(
|
||||
utils.NotificationFixture(transport.transport,
|
||||
['2']))
|
||||
transport.wait()
|
||||
|
||||
a = listener_a.notifier('pub-1', topic='1')
|
||||
b = listener_b.notifier('pub-2', topic='2')
|
||||
|
||||
a_out = [('test-1-%s' % c, 'payload-1-%s' % c) for c in 'abcdefgh']
|
||||
for event_type, payload in a_out:
|
||||
a.info({}, event_type, payload)
|
||||
|
||||
b_out = [('test-2-%s' % c, 'payload-2-%s' % c) for c in 'ijklmnop']
|
||||
for event_type, payload in b_out:
|
||||
b.info({}, event_type, payload)
|
||||
|
||||
for expected in a_out:
|
||||
actual = listener_a.events.get(timeout=0.5)
|
||||
self.assertEqual('info', actual[0])
|
||||
self.assertEqual(expected[0], actual[1])
|
||||
self.assertEqual(expected[1], actual[2])
|
||||
self.assertEqual('pub-1', actual[3])
|
||||
|
||||
for expected in b_out:
|
||||
actual = listener_b.events.get(timeout=0.5)
|
||||
self.assertEqual('info', actual[0])
|
||||
self.assertEqual(expected[0], actual[1])
|
||||
self.assertEqual(expected[1], actual[2])
|
||||
self.assertEqual('pub-2', actual[3])
|
||||
|
||||
def test_all_categories(self):
|
||||
transport = self.useFixture(utils.TransportFixture(self.url))
|
||||
listener = self.useFixture(utils.NotificationFixture(
|
||||
transport.transport, ['test_all_categories']))
|
||||
transport.wait()
|
||||
n = listener.notifier('abc')
|
||||
|
||||
cats = ['debug', 'audit', 'info', 'warn', 'error', 'critical']
|
||||
events = [(getattr(n, c), c, 'type-' + c, c + '-data') for c in cats]
|
||||
for e in events:
|
||||
e[0]({}, e[2], e[3])
|
||||
|
||||
# order between events with different categories is not guaranteed
|
||||
received = {}
|
||||
for expected in events:
|
||||
e = listener.events.get(timeout=0.5)
|
||||
received[e[0]] = e
|
||||
|
||||
for expected in events:
|
||||
actual = received[expected[1]]
|
||||
self.assertEqual(expected[1], actual[0])
|
||||
self.assertEqual(expected[2], actual[1])
|
||||
self.assertEqual(expected[3], actual[2])
|
344
oslo_messaging/tests/functional/utils.py
Normal file
344
oslo_messaging/tests/functional/utils.py
Normal file
@ -0,0 +1,344 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
import uuid
|
||||
|
||||
import fixtures
|
||||
from six import moves
|
||||
|
||||
from oslo.config import cfg
|
||||
import oslo_messaging
|
||||
from oslo_messaging.notify import notifier
|
||||
from oslo_messaging.tests import utils as test_utils
|
||||
|
||||
|
||||
class TestServerEndpoint(object):
|
||||
"""This MessagingServer that will be used during functional testing."""
|
||||
|
||||
def __init__(self):
|
||||
self.ival = 0
|
||||
self.sval = ''
|
||||
|
||||
def add(self, ctxt, increment):
|
||||
self.ival += increment
|
||||
return self.ival
|
||||
|
||||
def subtract(self, ctxt, increment):
|
||||
if self.ival < increment:
|
||||
raise ValueError("ival can't go negative!")
|
||||
self.ival -= increment
|
||||
return self.ival
|
||||
|
||||
def append(self, ctxt, text):
|
||||
self.sval += text
|
||||
return self.sval
|
||||
|
||||
|
||||
class TransportFixture(fixtures.Fixture):
|
||||
"""Fixture defined to setup the oslo_messaging transport."""
|
||||
|
||||
def __init__(self, url):
|
||||
self.url = url
|
||||
|
||||
def setUp(self):
|
||||
super(TransportFixture, self).setUp()
|
||||
self.transport = oslo_messaging.get_transport(cfg.CONF, url=self.url)
|
||||
|
||||
def cleanUp(self):
|
||||
self.transport.cleanup()
|
||||
super(TransportFixture, self).cleanUp()
|
||||
|
||||
def wait(self):
|
||||
if self.url.startswith("rabbit") or self.url.startswith("qpid"):
|
||||
time.sleep(0.5)
|
||||
|
||||
|
||||
class RpcServerFixture(fixtures.Fixture):
|
||||
"""Fixture to setup the TestServerEndpoint."""
|
||||
|
||||
def __init__(self, transport, target, endpoint=None, ctrl_target=None):
|
||||
super(RpcServerFixture, self).__init__()
|
||||
self.transport = transport
|
||||
self.target = target
|
||||
self.endpoint = endpoint or TestServerEndpoint()
|
||||
self.syncq = moves.queue.Queue()
|
||||
self.ctrl_target = ctrl_target or self.target
|
||||
|
||||
def setUp(self):
|
||||
super(RpcServerFixture, self).setUp()
|
||||
endpoints = [self.endpoint, self]
|
||||
self.server = oslo_messaging.get_rpc_server(self.transport,
|
||||
self.target,
|
||||
endpoints)
|
||||
self._ctrl = oslo_messaging.RPCClient(self.transport, self.ctrl_target)
|
||||
self._start()
|
||||
|
||||
def cleanUp(self):
|
||||
self._stop()
|
||||
super(RpcServerFixture, self).cleanUp()
|
||||
|
||||
def _start(self):
|
||||
self.thread = threading.Thread(target=self.server.start)
|
||||
self.thread.daemon = True
|
||||
self.thread.start()
|
||||
|
||||
def _stop(self):
|
||||
self.server.stop()
|
||||
self._ctrl.cast({}, 'ping')
|
||||
self.server.wait()
|
||||
self.thread.join()
|
||||
|
||||
def ping(self, ctxt):
|
||||
pass
|
||||
|
||||
def sync(self, ctxt, item):
|
||||
self.syncq.put(item)
|
||||
|
||||
|
||||
class RpcServerGroupFixture(fixtures.Fixture):
|
||||
def __init__(self, url, topic=None, names=None, exchange=None,
|
||||
transport=None, use_fanout_ctrl=False):
|
||||
self.url = url
|
||||
# NOTE(sileht): topic and servier_name must be uniq
|
||||
# to be able to run all tests in parallel
|
||||
self.topic = topic or str(uuid.uuid4())
|
||||
self.names = names or ["server_%i_%s" % (i, uuid.uuid4())
|
||||
for i in range(3)]
|
||||
self.exchange = exchange
|
||||
self.targets = [self._target(server=n) for n in self.names]
|
||||
self.transport = transport
|
||||
self.use_fanout_ctrl = use_fanout_ctrl
|
||||
|
||||
def setUp(self):
|
||||
super(RpcServerGroupFixture, self).setUp()
|
||||
if not self.transport:
|
||||
self.transport = self.useFixture(TransportFixture(self.url))
|
||||
self.servers = [self.useFixture(self._server(t)) for t in self.targets]
|
||||
self.transport.wait()
|
||||
|
||||
def _target(self, server=None, fanout=False):
|
||||
t = oslo_messaging.Target(exchange=self.exchange, topic=self.topic)
|
||||
t.server = server
|
||||
t.fanout = fanout
|
||||
return t
|
||||
|
||||
def _server(self, target):
|
||||
ctrl = None
|
||||
if self.use_fanout_ctrl:
|
||||
ctrl = self._target(fanout=True)
|
||||
return RpcServerFixture(self.transport.transport, target,
|
||||
ctrl_target=ctrl)
|
||||
|
||||
def client(self, server=None, cast=False):
|
||||
if server:
|
||||
if server == 'all':
|
||||
target = self._target(fanout=True)
|
||||
elif server >= 0 and server < len(self.targets):
|
||||
target = self.targets[server]
|
||||
else:
|
||||
raise ValueError("Invalid value for server: %r" % server)
|
||||
else:
|
||||
target = self._target()
|
||||
return ClientStub(self.transport.transport, target, cast=cast,
|
||||
timeout=5)
|
||||
|
||||
def sync(self, server=None):
|
||||
if server:
|
||||
if server == 'all':
|
||||
c = self.client(server='all', cast=True)
|
||||
c.sync(item='x')
|
||||
for s in self.servers:
|
||||
s.syncq.get(timeout=5)
|
||||
elif server >= 0 and server < len(self.targets):
|
||||
c = self.client(server=server, cast=True)
|
||||
c.sync(item='x')
|
||||
self.servers[server].syncq.get(timeout=5)
|
||||
else:
|
||||
raise ValueError("Invalid value for server: %r" % server)
|
||||
else:
|
||||
for i in range(len(self.servers)):
|
||||
self.client(i).ping()
|
||||
|
||||
|
||||
class RpcCall(object):
|
||||
def __init__(self, client, method, context):
|
||||
self.client = client
|
||||
self.method = method
|
||||
self.context = context
|
||||
|
||||
def __call__(self, **kwargs):
|
||||
self.context['time'] = time.ctime()
|
||||
self.context['cast'] = False
|
||||
result = self.client.call(self.context, self.method, **kwargs)
|
||||
return result
|
||||
|
||||
|
||||
class RpcCast(RpcCall):
|
||||
def __call__(self, **kwargs):
|
||||
self.context['time'] = time.ctime()
|
||||
self.context['cast'] = True
|
||||
self.client.cast(self.context, self.method, **kwargs)
|
||||
|
||||
|
||||
class ClientStub(object):
|
||||
def __init__(self, transport, target, cast=False, name=None, **kwargs):
|
||||
self.name = name or "functional-tests"
|
||||
self.cast = cast
|
||||
self.client = oslo_messaging.RPCClient(transport, target, **kwargs)
|
||||
|
||||
def __getattr__(self, name):
|
||||
context = {"application": self.name}
|
||||
if self.cast:
|
||||
return RpcCast(self.client, name, context)
|
||||
else:
|
||||
return RpcCall(self.client, name, context)
|
||||
|
||||
|
||||
class InvalidDistribution(object):
|
||||
def __init__(self, original, received):
|
||||
self.original = original
|
||||
self.received = received
|
||||
self.missing = []
|
||||
self.extra = []
|
||||
self.wrong_order = []
|
||||
|
||||
def describe(self):
|
||||
text = "Sent %s, got %s; " % (self.original, self.received)
|
||||
e1 = ["%r was missing" % m for m in self.missing]
|
||||
e2 = ["%r was not expected" % m for m in self.extra]
|
||||
e3 = ["%r expected before %r" % (m[0], m[1]) for m in self.wrong_order]
|
||||
return text + ", ".join(e1 + e2 + e3)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.extra) + len(self.missing) + len(self.wrong_order)
|
||||
|
||||
def get_details(self):
|
||||
return {}
|
||||
|
||||
|
||||
class IsValidDistributionOf(object):
|
||||
"""Test whether a given list can be split into particular
|
||||
sub-lists. All items in the original list must be in exactly one
|
||||
sub-list, and must appear in that sub-list in the same order with
|
||||
respect to any other items as in the original list.
|
||||
"""
|
||||
def __init__(self, original):
|
||||
self.original = original
|
||||
|
||||
def __str__(self):
|
||||
return 'IsValidDistribution(%s)' % self.original
|
||||
|
||||
def match(self, actual):
|
||||
errors = InvalidDistribution(self.original, actual)
|
||||
received = [[i for i in l] for l in actual]
|
||||
|
||||
def _remove(obj, lists):
|
||||
for l in lists:
|
||||
if obj in l:
|
||||
front = l[0]
|
||||
l.remove(obj)
|
||||
return front
|
||||
return None
|
||||
|
||||
for item in self.original:
|
||||
o = _remove(item, received)
|
||||
if not o:
|
||||
errors.missing += item
|
||||
elif item != o:
|
||||
errors.wrong_order.append([item, o])
|
||||
for l in received:
|
||||
errors.extra += l
|
||||
return errors or None
|
||||
|
||||
|
||||
class SkipIfNoTransportURL(test_utils.BaseTestCase):
|
||||
def setUp(self):
|
||||
super(SkipIfNoTransportURL, self).setUp()
|
||||
self.url = os.environ.get('TRANSPORT_URL')
|
||||
if not self.url:
|
||||
self.skipTest("No transport url configured")
|
||||
|
||||
|
||||
class NotificationFixture(fixtures.Fixture):
|
||||
def __init__(self, transport, topics):
|
||||
super(NotificationFixture, self).__init__()
|
||||
self.transport = transport
|
||||
self.topics = topics
|
||||
self.events = moves.queue.Queue()
|
||||
self.name = str(id(self))
|
||||
|
||||
def setUp(self):
|
||||
super(NotificationFixture, self).setUp()
|
||||
targets = [oslo_messaging.Target(topic=t) for t in self.topics]
|
||||
# add a special topic for internal notifications
|
||||
targets.append(oslo_messaging.Target(topic=self.name))
|
||||
self.server = oslo_messaging.get_notification_listener(
|
||||
self.transport,
|
||||
targets,
|
||||
[self])
|
||||
self._ctrl = self.notifier('internal', topic=self.name)
|
||||
self._start()
|
||||
|
||||
def cleanUp(self):
|
||||
self._stop()
|
||||
super(NotificationFixture, self).cleanUp()
|
||||
|
||||
def _start(self):
|
||||
self.thread = threading.Thread(target=self.server.start)
|
||||
self.thread.daemon = True
|
||||
self.thread.start()
|
||||
|
||||
def _stop(self):
|
||||
self.server.stop()
|
||||
self._ctrl.sample({}, 'shutdown', 'shutdown')
|
||||
self.server.wait()
|
||||
self.thread.join()
|
||||
|
||||
def notifier(self, publisher, topic=None):
|
||||
return notifier.Notifier(self.transport,
|
||||
publisher,
|
||||
driver='messaging',
|
||||
topic=topic or self.topics[0])
|
||||
|
||||
def debug(self, ctxt, publisher, event_type, payload, metadata):
|
||||
self.events.put(['debug', event_type, payload, publisher])
|
||||
|
||||
def audit(self, ctxt, publisher, event_type, payload, metadata):
|
||||
self.events.put(['audit', event_type, payload, publisher])
|
||||
|
||||
def info(self, ctxt, publisher, event_type, payload, metadata):
|
||||
self.events.put(['info', event_type, payload, publisher])
|
||||
|
||||
def warn(self, ctxt, publisher, event_type, payload, metadata):
|
||||
self.events.put(['warn', event_type, payload, publisher])
|
||||
|
||||
def error(self, ctxt, publisher, event_type, payload, metadata):
|
||||
self.events.put(['error', event_type, payload, publisher])
|
||||
|
||||
def critical(self, ctxt, publisher, event_type, payload, metadata):
|
||||
self.events.put(['critical', event_type, payload, publisher])
|
||||
|
||||
def sample(self, ctxt, publisher, event_type, payload, metadata):
|
||||
pass # Just used for internal shutdown control
|
||||
|
||||
def get_events(self, timeout=0.5):
|
||||
results = []
|
||||
try:
|
||||
while True:
|
||||
results.append(self.events.get(timeout=timeout))
|
||||
except moves.queue.Empty:
|
||||
pass
|
||||
return results
|
0
oslo_messaging/tests/notify/__init__.py
Normal file
0
oslo_messaging/tests/notify/__init__.py
Normal file
149
oslo_messaging/tests/notify/test_dispatcher.py
Normal file
149
oslo_messaging/tests/notify/test_dispatcher.py
Normal file
@ -0,0 +1,149 @@
|
||||
|
||||
# Copyright 2013 eNovance
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import itertools
|
||||
|
||||
import mock
|
||||
import testscenarios
|
||||
|
||||
from oslo.utils import timeutils
|
||||
import oslo_messaging
|
||||
from oslo_messaging.notify import dispatcher as notify_dispatcher
|
||||
from oslo_messaging.tests import utils as test_utils
|
||||
|
||||
load_tests = testscenarios.load_tests_apply_scenarios
|
||||
|
||||
|
||||
notification_msg = dict(
|
||||
publisher_id="publisher_id",
|
||||
event_type="compute.start",
|
||||
payload={"info": "fuu"},
|
||||
message_id="uuid",
|
||||
timestamp=str(timeutils.utcnow())
|
||||
)
|
||||
|
||||
|
||||
class TestDispatcher(test_utils.BaseTestCase):
|
||||
|
||||
scenarios = [
|
||||
('no_endpoints',
|
||||
dict(endpoints=[],
|
||||
endpoints_expect_calls=[],
|
||||
priority='info',
|
||||
ex=None,
|
||||
return_value=oslo_messaging.NotificationResult.HANDLED)),
|
||||
('one_endpoints',
|
||||
dict(endpoints=[['warn']],
|
||||
endpoints_expect_calls=['warn'],
|
||||
priority='warn',
|
||||
ex=None,
|
||||
return_value=oslo_messaging.NotificationResult.HANDLED)),
|
||||
('two_endpoints_only_one_match',
|
||||
dict(endpoints=[['warn'], ['info']],
|
||||
endpoints_expect_calls=[None, 'info'],
|
||||
priority='info',
|
||||
ex=None,
|
||||
return_value=oslo_messaging.NotificationResult.HANDLED)),
|
||||
('two_endpoints_both_match',
|
||||
dict(endpoints=[['debug', 'info'], ['info', 'debug']],
|
||||
endpoints_expect_calls=['debug', 'debug'],
|
||||
priority='debug',
|
||||
ex=None,
|
||||
return_value=oslo_messaging.NotificationResult.HANDLED)),
|
||||
('no_return_value',
|
||||
dict(endpoints=[['warn']],
|
||||
endpoints_expect_calls=['warn'],
|
||||
priority='warn',
|
||||
ex=None, return_value=None)),
|
||||
('requeue',
|
||||
dict(endpoints=[['debug', 'warn']],
|
||||
endpoints_expect_calls=['debug'],
|
||||
priority='debug', msg=notification_msg,
|
||||
ex=None,
|
||||
return_value=oslo_messaging.NotificationResult.REQUEUE)),
|
||||
('exception',
|
||||
dict(endpoints=[['debug', 'warn']],
|
||||
endpoints_expect_calls=['debug'],
|
||||
priority='debug', msg=notification_msg,
|
||||
ex=Exception,
|
||||
return_value=oslo_messaging.NotificationResult.HANDLED)),
|
||||
]
|
||||
|
||||
def test_dispatcher(self):
|
||||
endpoints = []
|
||||
for endpoint_methods in self.endpoints:
|
||||
e = mock.Mock(spec=endpoint_methods)
|
||||
endpoints.append(e)
|
||||
for m in endpoint_methods:
|
||||
method = getattr(e, m)
|
||||
if self.ex:
|
||||
method.side_effect = self.ex()
|
||||
else:
|
||||
method.return_value = self.return_value
|
||||
|
||||
msg = notification_msg.copy()
|
||||
msg['priority'] = self.priority
|
||||
|
||||
targets = [oslo_messaging.Target(topic='notifications')]
|
||||
dispatcher = notify_dispatcher.NotificationDispatcher(
|
||||
targets, endpoints, None, allow_requeue=True, pool=None)
|
||||
|
||||
# check it listen on wanted topics
|
||||
self.assertEqual(sorted(set((targets[0], prio)
|
||||
for prio in itertools.chain.from_iterable(
|
||||
self.endpoints))),
|
||||
sorted(dispatcher._targets_priorities))
|
||||
|
||||
incoming = mock.Mock(ctxt={}, message=msg)
|
||||
with dispatcher(incoming) as callback:
|
||||
callback()
|
||||
|
||||
# check endpoint callbacks are called or not
|
||||
for i, endpoint_methods in enumerate(self.endpoints):
|
||||
for m in endpoint_methods:
|
||||
if m == self.endpoints_expect_calls[i]:
|
||||
method = getattr(endpoints[i], m)
|
||||
method.assert_called_once_with(
|
||||
{},
|
||||
msg['publisher_id'],
|
||||
msg['event_type'],
|
||||
msg['payload'], {
|
||||
'timestamp': mock.ANY,
|
||||
'message_id': mock.ANY
|
||||
})
|
||||
else:
|
||||
self.assertEqual(0, endpoints[i].call_count)
|
||||
|
||||
if self.ex:
|
||||
self.assertEqual(1, incoming.acknowledge.call_count)
|
||||
self.assertEqual(0, incoming.requeue.call_count)
|
||||
elif self.return_value == oslo_messaging.NotificationResult.HANDLED \
|
||||
or self.return_value is None:
|
||||
self.assertEqual(1, incoming.acknowledge.call_count)
|
||||
self.assertEqual(0, incoming.requeue.call_count)
|
||||
elif self.return_value == oslo_messaging.NotificationResult.REQUEUE:
|
||||
self.assertEqual(0, incoming.acknowledge.call_count)
|
||||
self.assertEqual(1, incoming.requeue.call_count)
|
||||
|
||||
@mock.patch('oslo_messaging.notify.dispatcher.LOG')
|
||||
def test_dispatcher_unknown_prio(self, mylog):
|
||||
msg = notification_msg.copy()
|
||||
msg['priority'] = 'what???'
|
||||
dispatcher = notify_dispatcher.NotificationDispatcher(
|
||||
[mock.Mock()], [mock.Mock()], None, allow_requeue=True, pool=None)
|
||||
with dispatcher(mock.Mock(ctxt={}, message=msg)) as callback:
|
||||
callback()
|
||||
mylog.warning.assert_called_once_with('Unknown priority "%s"',
|
||||
'what???')
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user