fix(wsgi): Cleanup limit config options

This patch contains several misc. changes to queue, message, and
claim limits to reduce confusion and bring the implementation in
line with the v1 spec.

1. Removed a couple of WSGI driver config options that are
no longer needed now that we have redefined (and simplified) how
we constrain message and metadata size.

    metadata_max_length = 65536
    content_max_length = 262144

2. Renamed options to be more readable and consistent
3. Moved options to [transport] section
4. Made max messages that can be claimed its own setting, to reduce confusion
5. Removed enforcing an upper limit on the number of messages that can be
posted; this was never in the spec, and appears to be gold-plating. Now, the
only upper limit is max_message_size.
6. Removed the check on the size of a create claim request since (1) it is
not part of the API spec, and (2) sanity-checks like that are best done by
the web server, before a request even touches the app.
7. Migrated limits for storage driver interface params to static values,
since those defaults define the static contract between transport and
storage drivers.
8. Wrapped validation error messages in gettextutils._, and converted them
to use .format instead of %.

Change-Id: I1372e5002f030f5c8c47774ab00ca8ee7e12232d
Closes-Bug: #1270260
This commit is contained in:
kgriffs 2014-01-17 16:50:58 -06:00 committed by Gerrit Code Review
parent 280da9d054
commit a8d21ee296
27 changed files with 311 additions and 268 deletions

View File

@ -20,7 +20,9 @@ log_file = /var/log/marconi/queues.log
# Set to True to activate endpoints to manage the shard registry
;admin_mode = False
# ================= Syslog Options ============================
# ======================================================================
# Syslog
# ======================================================================
# Send logs to syslog (/dev/log) instead of to file specified
# by `log_file`
@ -29,7 +31,10 @@ log_file = /var/log/marconi/queues.log
# Facility to use. If unset defaults to LOG_USER.
;syslog_log_facility = LOG_LOCAL0
# ================= Driver Options ============================
# ======================================================================
# Drivers
# ======================================================================
[drivers]
# Transport driver module (e.g., wsgi, zmq)
@ -38,27 +43,58 @@ transport = wsgi
# Storage driver module (e.g., mongodb, sqlite)
storage = mongodb
# TODO(kgriffs): Add example stages
# ======================================================================
# General storage options
# ======================================================================
[storage]
# Pipeline for operations on queue resources
;queue_pipeline =
# Pipeline for operations on message resources
;message_pipeline =
# Pipeline for operations on claim resources
;claim_pipeline =
# ======================================================================
# General transport options
# ======================================================================
[transport]
# Maximum number of queue records that may be requested per page,
# when listing queues.
;max_queues_per_page = 20
# Maximum number of messages per page when listing messages. Also,
# determines the max number of messages that can be requested or
# deleted by ID.
;max_messages_per_page = 20
# Maximum number of messages that can be claimed at a time.
;max_messages_per_claim = 20
# Maximum lifetime, in seconds. Minimal values are all 60 seconds.
;max_message_ttl = 1209600
;max_claim_ttl = 43200
;max_claim_grace = 43200
# Maximum size, in bytes, allowed for queue metadata and bulk/single
# message post bodies. Includes whitespace and envelope fields, if any.
;max_queue_metadata = 65536
;max_message_size = 262144
# ======================================================================
# Driver-specific transport options
# ======================================================================
[drivers:transport:wsgi]
;bind = 0.0.0.0
;port = 8888
# Maximum Content-Length allowed for metadata updating and
# message posting.
;metadata_max_length = 65536
;content_max_length = 262144
;[drivers:transport:zmq]
;port = 9999
# ======================================================================
# Driver-specific storage options
# ======================================================================
[drivers:storage:mongodb]
uri = mongodb://db1.example.net,db2.example.net:2500/?replicaSet=test&ssl=true&w=majority
database = marconi
@ -74,7 +110,7 @@ database = marconi
# only used for retrying a message post.
;max_attempts = 1000
# Maximum sleep interval between retries (actual sleep time
# Maximum sleep interval between retries in seconds (actual sleep time
# increases linearly according to number of attempts performed).
;max_retry_sleep = 0.1
@ -83,28 +119,3 @@ database = marconi
# at the same instant.
;max_retry_jitter = 0.005
[limits:transport]
# The maximum number of queue records per page when listing queues
;queue_paging_uplimit = 20
# The maximum number of messages in a message posting, maximum
# number of messages per page when listing or claiming messages,
# and maximum number of messages involved in a bulk operation.
;message_paging_uplimit = 20
# Expiration limits; the minimal values are all 60 (seconds)
;message_ttl_max = 1209600
;claim_ttl_max = 43200
;claim_grace_max = 43200
# Maximum compact-JSON (without whitespace) size in bytes allowed
# for each metadata body and each message body
;metadata_size_uplimit = 65536
;message_size_uplimit = 262144
[limits:storage]
# The default number of queue records per page when listing queues
;default_queue_paging = 10
# The default number of messages per page when listing or claiming messages
;default_message_paging = 10

View File

@ -37,7 +37,7 @@ class ResponseSchema(api.Api):
"ttl": {
"type": "number",
"minimum": 1,
"maximum": self.limits.message_ttl_max
"maximum": self.limits.max_message_ttl
},
"age": {
"type": "number",
@ -50,6 +50,6 @@ class ResponseSchema(api.Api):
"required": ["href", "ttl", "age", "body"]
},
"minItems": 1,
"maxItems": self.limits.message_paging_uplimit
"maxItems": self.limits.max_messages_per_page
}
}

View File

@ -24,3 +24,9 @@ Claim = base.Claim
Message = base.Message
Queue = base.Queue
ShardsBase = base.ShardsBase
DEFAULT_QUEUES_PER_PAGE = base.DEFAULT_QUEUES_PER_PAGE
DEFAULT_MESSAGES_PER_PAGE = base.DEFAULT_MESSAGES_PER_PAGE
DEFAULT_SHARDS_PER_PAGE = base.DEFAULT_SHARDS_PER_PAGE
DEFAULT_MESSAGES_PER_CLAIM = base.DEFAULT_MESSAGES_PER_CLAIM

View File

@ -18,24 +18,11 @@
import abc
import six
from oslo.config import cfg
DEFAULT_QUEUES_PER_PAGE = 10
DEFAULT_MESSAGES_PER_PAGE = 10
DEFAULT_SHARDS_PER_PAGE = 10
from marconi.common import utils
_LIMITS_OPTIONS = [
cfg.IntOpt('default_queue_paging', default=10,
help='Default queue pagination size'),
cfg.IntOpt('default_message_paging', default=10,
help='Default message pagination size')
]
_LIMITS_GROUP = 'limits:storage'
def _config_options():
return utils.options_iter(_LIMITS_OPTIONS, _LIMITS_GROUP)
DEFAULT_MESSAGES_PER_CLAIM = 10
@six.add_metaclass(abc.ABCMeta)
@ -73,9 +60,6 @@ class DataDriverBase(DriverBase):
def __init__(self, conf, cache):
super(DataDriverBase, self).__init__(conf, cache)
self.conf.register_opts(_LIMITS_OPTIONS, group=_LIMITS_GROUP)
self.limits_conf = self.conf[_LIMITS_GROUP]
@abc.abstractmethod
def is_alive(self):
"""Check whether the storage is ready."""
@ -150,13 +134,12 @@ class Queue(ControllerBase):
@abc.abstractmethod
def list(self, project=None, marker=None,
limit=None, detailed=False):
limit=DEFAULT_QUEUES_PER_PAGE, detailed=False):
"""Base method for listing queues.
:param project: Project id
:param marker: The last queue name
:param limit: (Default 10, configurable) Max number
queues to return.
:param limit: (Default 10) Max number of queues to return
:param detailed: Whether metadata is included
:returns: An iterator giving a sequence of queues
@ -236,7 +219,8 @@ class Message(ControllerBase):
@abc.abstractmethod
def list(self, queue, project=None, marker=None,
limit=None, echo=False, client_uuid=None,
limit=DEFAULT_MESSAGES_PER_PAGE,
echo=False, client_uuid=None,
include_claimed=False):
"""Base method for listing messages.
@ -244,8 +228,7 @@ class Message(ControllerBase):
message from.
:param project: Project id
:param marker: Tail identifier
:param limit: (Default 10, configurable) Max number
messages to return.
:param limit: (Default 10) Max number of messages to return.
:type limit: Maybe int
:param echo: (Default False) Boolean expressing whether
or not this client should receive its own messages.
@ -295,7 +278,8 @@ class Message(ControllerBase):
:param project: Project id
:param message_ids: A sequence of message IDs.
:returns: An iterable, yielding dicts containing message details
:returns: An iterable, yielding dicts containing
message details
"""
raise NotImplementedError
@ -364,7 +348,8 @@ class Claim(ControllerBase):
raise NotImplementedError
@abc.abstractmethod
def create(self, queue, metadata, project=None, limit=None):
def create(self, queue, metadata, project=None,
limit=DEFAULT_MESSAGES_PER_CLAIM):
"""Base method for creating a claim.
:param queue: Name of the queue this
@ -372,7 +357,7 @@ class Claim(ControllerBase):
:param metadata: Claim's parameters
to be stored.
:param project: Project id
:param limit: (Default 10, configurable) Max number
:param limit: (Default 10) Max number
of messages to claim.
:returns: (Claim ID, claimed messages)
@ -409,12 +394,13 @@ class ShardsBase(ControllerBase):
"""A controller for managing shards."""
@abc.abstractmethod
def list(self, marker=None, limit=10, detailed=False):
def list(self, marker=None, limit=DEFAULT_SHARDS_PER_PAGE,
detailed=False):
"""Lists all registered shards.
:param marker: used to determine which shard to start with
:type marker: six.text_type
:param limit: how many results to return
:param limit: (Default 10) Max number of results to return
:type limit: int
:param detailed: whether to include options
:type detailed: bool

View File

@ -98,7 +98,8 @@ class ClaimController(storage.Claim):
return (claim_meta, msgs)
@utils.raises_conn_error
def create(self, queue, metadata, project=None, limit=None):
def create(self, queue, metadata, project=None,
limit=storage.DEFAULT_MESSAGES_PER_CLAIM):
"""Creates a claim.
This implementation was done in a best-effort fashion.
@ -118,9 +119,6 @@ class ClaimController(storage.Claim):
"""
msg_ctrl = self.driver.message_controller
if limit is None:
limit = self.driver.limits_conf.default_message_paging
ttl = metadata['ttl']
grace = metadata['grace']
oid = objectid.ObjectId()

View File

@ -394,12 +394,10 @@ class MessageController(storage.Message):
# Public interface
#-----------------------------------------------------------------------
def list(self, queue_name, project=None, marker=None, limit=None,
def list(self, queue_name, project=None, marker=None,
limit=storage.DEFAULT_MESSAGES_PER_PAGE,
echo=False, client_uuid=None, include_claimed=False):
if limit is None:
limit = self.driver.limits_conf.default_message_paging
if marker is not None:
try:
marker = int(marker)

View File

@ -169,10 +169,7 @@ class QueueController(storage.Queue):
#-----------------------------------------------------------------------
def list(self, project=None, marker=None,
limit=None, detailed=False):
if limit is None:
limit = self.driver.limits_conf.default_queue_paging
limit=storage.DEFAULT_QUEUES_PER_PAGE, detailed=False):
query = utils.scoped_query(marker, project)

View File

@ -140,7 +140,7 @@ class QueueController(RoutingController):
self._lookup = self._shard_catalog.lookup
def list(self, project=None, marker=None,
limit=None, detailed=False):
limit=storage.DEFAULT_QUEUES_PER_PAGE, detailed=False):
def all_pages():
for shard in self._shard_catalog._shards_ctrl.list(limit=0):
@ -157,9 +157,6 @@ class QueueController(RoutingController):
for page in all_pages()
])
if limit is None:
limit = self._shard_catalog._limits_conf.default_queue_paging
marker_name = {}
# limit the iterator and strip out the comparison wrapper
@ -342,10 +339,6 @@ class Catalog(object):
self._conf.register_opts(_CATALOG_OPTIONS, group=_CATALOG_GROUP)
self._catalog_conf = self._conf[_CATALOG_GROUP]
self._conf.register_opts(storage.base._LIMITS_OPTIONS,
group=storage.base._LIMITS_GROUP)
self._limits_conf = self._conf[storage.base._LIMITS_GROUP]
self._shards_ctrl = control.shards_controller
self._catalogue_ctrl = control.catalogue_controller

View File

@ -13,12 +13,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from marconi.queues.storage import base
from marconi.queues import storage
from marconi.queues.storage import errors
from marconi.queues.storage.sqlite import utils
class ClaimController(base.Claim):
class ClaimController(storage.Claim):
def get(self, queue, claim_id, project):
if project is None:
@ -50,14 +50,12 @@ class ClaimController(base.Claim):
except utils.NoResult:
raise errors.ClaimDoesNotExist(claim_id, queue, project)
def create(self, queue, metadata, project, limit=None):
def create(self, queue, metadata, project,
limit=storage.DEFAULT_MESSAGES_PER_CLAIM):
if project is None:
project = ''
if limit is None:
limit = self.driver.limits_conf.default_message_paging
with self.driver('immediate'):
try:
qid = utils.get_qid(self.driver, queue, project)

View File

@ -14,12 +14,12 @@
# limitations under the License.
from marconi.openstack.common import timeutils
from marconi.queues.storage import base
from marconi.queues import storage
from marconi.queues.storage import errors
from marconi.queues.storage.sqlite import utils
class MessageController(base.Message):
class MessageController(storage.Message):
def get(self, queue, message_id, project):
if project is None:
@ -114,12 +114,10 @@ class MessageController(base.Message):
'body': content,
}
def list(self, queue, project, marker=None, limit=None,
def list(self, queue, project, marker=None,
limit=storage.DEFAULT_MESSAGES_PER_PAGE,
echo=False, client_uuid=None, include_claimed=False):
if limit is None:
limit = self.driver.limits_conf.default_message_paging
if project is None:
project = ''

View File

@ -14,22 +14,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from marconi.queues.storage import base
from marconi.queues import storage
from marconi.queues.storage import errors
from marconi.queues.storage.sqlite import utils
class QueueController(base.Queue):
class QueueController(storage.Queue):
def list(self, project, marker=None,
limit=None, detailed=False):
limit=storage.DEFAULT_QUEUES_PER_PAGE, detailed=False):
if project is None:
project = ''
if limit is None:
limit = self.driver.limits_conf.default_queue_paging
sql = (('''
select name from Queues''' if not detailed
else '''

View File

@ -20,18 +20,40 @@ from oslo.config import cfg
from marconi.common import utils
from marconi.openstack.common.gettextutils import _
MIN_MESSAGE_TTL = 60
MIN_CLAIM_TTL = 60
MIN_CLAIM_GRACE = 60
_TRANSPORT_LIMITS_OPTIONS = [
cfg.IntOpt('queue_paging_uplimit', default=20),
cfg.IntOpt('metadata_size_uplimit', default=64 * 1024),
cfg.IntOpt('message_paging_uplimit', default=20),
cfg.IntOpt('message_size_uplimit', default=256 * 1024),
cfg.IntOpt('message_ttl_max', default=1209600),
cfg.IntOpt('claim_ttl_max', default=43200),
cfg.IntOpt('claim_grace_max', default=43200),
cfg.IntOpt('max_queues_per_page', default=20,
deprecated_name='queue_paging_uplimit',
deprecated_group='limits:transport'),
cfg.IntOpt('max_messages_per_page', default=20,
deprecated_name='message_paging_uplimit',
deprecated_group='limits:transport'),
cfg.IntOpt('max_messages_per_claim', default=20),
cfg.IntOpt('max_queue_metadata', default=64 * 1024,
deprecated_name='metadata_size_uplimit',
deprecated_group='limits:transport'),
cfg.IntOpt('max_message_size', default=256 * 1024,
deprecated_name='message_size_uplimit',
deprecated_group='limits:transport'),
cfg.IntOpt('max_message_ttl', default=1209600,
deprecated_name='message_ttl_max',
deprecated_group='limits:transport'),
cfg.IntOpt('max_claim_ttl', default=43200,
deprecated_name='claim_ttl_max',
deprecated_group='limits:transport'),
cfg.IntOpt('max_claim_grace', default=43200,
deprecated_name='claim_grace_max',
deprecated_group='limits:transport'),
]
_TRANSPORT_LIMITS_GROUP = 'limits:transport'
_TRANSPORT_LIMITS_GROUP = 'transport'
# NOTE(kgriffs): Don't use \w because it isn't guaranteed to match
# only ASCII characters.
@ -48,6 +70,10 @@ def _config_options():
class ValidationFailed(ValueError):
"""User input did not follow API restrictions."""
def __init__(self, msg, *args, **kwargs):
msg = msg.format(*args, **kwargs)
super(ValidationFailed, self).__init__(msg)
class Validator(object):
def __init__(self, conf):
@ -68,19 +94,17 @@ class Validator(object):
"""
if project is not None and len(project) > PROJECT_ID_MAX_LEN:
raise ValidationFailed(
'Project ids may not be more than %d characters long.'
% PROJECT_ID_MAX_LEN)
msg = _(u'Project ids may not be more than {0} characters long.')
raise ValidationFailed(msg, PROJECT_ID_MAX_LEN)
if len(queue) > QUEUE_NAME_MAX_LEN:
raise ValidationFailed(
'Queue names may not be more than %d characters long.'
% QUEUE_NAME_MAX_LEN)
msg = _(u'Queue names may not be more than {0} characters long.')
raise ValidationFailed(msg, QUEUE_NAME_MAX_LEN)
if not QUEUE_NAME_REGEX.match(queue):
raise ValidationFailed(
'Queue names may only contain ASCII letters, digits, '
'underscores, and dashes.')
_(u'Queue names may only contain ASCII letters, digits, '
'underscores, and dashes.'))
def queue_listing(self, limit=None, **kwargs):
"""Restrictions involving a list of queues.
@ -90,11 +114,10 @@ class Validator(object):
:raises: ValidationFailed if the limit is exceeded
"""
uplimit = self._limits_conf.queue_paging_uplimit
uplimit = self._limits_conf.max_queues_per_page
if limit is not None and not (0 < limit <= uplimit):
raise ValidationFailed(
'Limit must be at least 1 and no greater than %d.' %
self._limits_conf.queue_paging_uplimit)
msg = _(u'Limit must be at least 1 and no greater than {0}.')
raise ValidationFailed(msg, self._limits_conf.max_queues_per_page)
def queue_metadata_length(self, content_length):
"""Restrictions on queue's length.
@ -102,20 +125,21 @@ class Validator(object):
:param content_length: Queue request's length.
:raises: ValidationFailed if the metadata is oversize.
"""
if content_length > self._limits_conf.metadata_size_uplimit:
error = _(u'Queue request size is too large. Max size %s')
raise ValidationFailed(error %
self._limits_conf.metadata_size_uplimit)
if content_length > self._limits_conf.max_queue_metadata:
msg = _(u'Queue metadata is too large. Max size: {0}')
raise ValidationFailed(msg, self._limits_conf.max_queue_metadata)
def message_posting(self, messages):
"""Restrictions on a list of messages.
:param messages: A list of messages
:raises: ValidationFailed if any message has a out-of-range
TTL, or an oversize message body.
TTL.
"""
self.message_listing(limit=len(messages))
if not messages:
raise ValidationFailed(_(u'No messages to enqueu.'))
for msg in messages:
self.message_content(msg)
@ -126,19 +150,22 @@ class Validator(object):
:param content_length: Queue request's length.
:raises: ValidationFailed if the metadata is oversize.
"""
if content_length > self._limits_conf.message_size_uplimit:
error = _(u'Message collection size is too large. Max size %s')
raise ValidationFailed(error %
self._limits_conf.message_size_uplimit)
if content_length > self._limits_conf.max_message_size:
raise ValidationFailed(
_(u'Message collection size is too large. Max size {0}'),
self._limits_conf.max_message_size)
def message_content(self, message):
"""Restrictions on each message."""
if not (60 <= message['ttl'] <= self._limits_conf.message_ttl_max):
ttl = message['ttl']
if not (MIN_MESSAGE_TTL <= ttl <= self._limits_conf.max_message_ttl):
msg = _(u'The TTL for a message may not exceed {0} seconds, and '
'must be at least {1} seconds long.')
raise ValidationFailed(
('The TTL for a message may not exceed %d seconds, and '
'must be at least 60 seconds long.') %
self._limits_conf.message_ttl_max)
msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL)
def message_listing(self, limit=None, **kwargs):
"""Restrictions involving a list of messages.
@ -148,40 +175,54 @@ class Validator(object):
:raises: ValidationFailed if the limit is exceeded
"""
uplimit = self._limits_conf.message_paging_uplimit
uplimit = self._limits_conf.max_messages_per_page
if limit is not None and not (0 < limit <= uplimit):
raise ValidationFailed(
'Limit must be at least 1 and may not be greater than %d. ' %
self._limits_conf.message_paging_uplimit)
msg = _(u'Limit must be at least 1 and may not '
'be greater than {0}.')
def claim_creation(self, metadata, **kwargs):
raise ValidationFailed(
msg, self._limits_conf.max_messages_per_page)
def claim_creation(self, metadata, limit=None):
"""Restrictions on the claim parameters upon creation.
:param metadata: The claim metadata
:param kwargs: Other arguments passed to storage API
:param limit: The number of messages to claim
:raises: ValidationFailed if either TTL or grace is out of range,
or the expected number of messages exceed the limit.
"""
self.message_listing(**kwargs)
self.claim_updating(metadata)
if not (60 <= metadata['grace'] <= self._limits_conf.claim_grace_max):
uplimit = self._limits_conf.max_messages_per_claim
if limit is not None and not (0 < limit <= uplimit):
msg = _(u'Limit must be at least 1 and may not '
'be greater than {0}.')
raise ValidationFailed(
('Grace must be at least 60 seconds and cannot '
'exceed %d.') %
self._limits_conf.claim_grace_max)
msg, self._limits_conf.max_messages_per_claim)
grace = metadata['grace']
if not (MIN_CLAIM_GRACE <= grace <= self._limits_conf.max_claim_grace):
msg = _(u'The grace for a claim may not exceed {0} seconds, and '
'must be at least {1} seconds long.')
raise ValidationFailed(
msg, self._limits_conf.max_claim_grace, MIN_CLAIM_GRACE)
def claim_updating(self, metadata):
"""Restrictions on the claim TTL.
:param metadata: The claim metadata
:param kwargs: Ignored arguments passed to storage API
:raises: ValidationFailed if the TTL is out of range
"""
if not (60 <= metadata['ttl'] <= self._limits_conf.claim_ttl_max):
ttl = metadata['ttl']
if not (MIN_CLAIM_TTL <= ttl <= self._limits_conf.max_claim_ttl):
msg = _(u'The TTL for a claim may not exceed {0} seconds, and '
'must be at least {1} seconds long.')
raise ValidationFailed(
('The TTL for a claim may not exceed %d seconds, and must be '
'at least 60 seconds long.') %
self._limits_conf.claim_ttl_max)
msg, self._limits_conf.max_message_ttl, MIN_CLAIM_TTL)

View File

@ -32,11 +32,10 @@ CLAIM_PATCH_SPEC = (('ttl', int),)
class Resource(object):
__slots__ = ('claim_controller', '_metadata_max_length', '_validate')
__slots__ = ('claim_controller', '_validate')
def __init__(self, wsgi_conf, validate, claim_controller):
self.claim_controller = claim_controller
self._metadata_max_length = wsgi_conf.metadata_max_length
self._validate = validate
@ -51,11 +50,6 @@ class CollectionResource(Resource):
limit = req.get_param_as_int('limit')
claim_options = {} if limit is None else {'limit': limit}
# Place JSON size restriction before parsing
if req.content_length > self._metadata_max_length:
description = _(u'Claim metadata size is too large.')
raise wsgi_errors.HTTPBadRequestBody(description)
# Read claim metadata (e.g., TTL) and raise appropriate
# HTTP errors as needed.
metadata, = wsgi_utils.filter_stream(req.stream, req.content_length,
@ -63,7 +57,7 @@ class CollectionResource(Resource):
# Claim some messages
try:
self._validate.claim_creation(metadata, **claim_options)
self._validate.claim_creation(metadata, limit=limit)
cid, msgs = self.claim_controller.create(
queue_name,
metadata=metadata,
@ -101,11 +95,10 @@ class CollectionResource(Resource):
class ItemResource(Resource):
__slots__ = ('claim_controller', '_metadata_max_length', '_validate')
__slots__ = ('claim_controller', '_validate')
def __init__(self, wsgi_conf, validate, claim_controller):
self.claim_controller = claim_controller
self._metadata_max_length = wsgi_conf.metadata_max_length
self._validate = validate
def on_get(self, req, resp, project_id, queue_name, claim_id):
@ -153,11 +146,6 @@ class ItemResource(Resource):
'project_id': project_id,
'claim_id': claim_id})
# Place JSON size restriction before parsing
if req.content_length > self._metadata_max_length:
description = _(u'Claim metadata size is too large.')
raise wsgi_errors.HTTPBadRequestBody(description)
# Read claim metadata (e.g., TTL) and raise appropriate
# HTTP errors as needed.
metadata, = wsgi_utils.filter_stream(req.stream, req.content_length,

View File

@ -38,9 +38,6 @@ _WSGI_OPTIONS = [
cfg.IntOpt('port', default=8888,
help='Port on which the self-hosting server will listen'),
cfg.IntOpt('content_max_length', default=256 * 1024),
cfg.IntOpt('metadata_max_length', default=64 * 1024)
]
_WSGI_GROUP = 'drivers:transport:wsgi'

View File

@ -133,7 +133,7 @@ class FunctionalTestBase(testing.TestBase):
:param result_json: json response returned for Queue Stats.
:param claimed: expected number of claimed messages.
"""
total = self.limits.message_paging_uplimit
total = self.limits.max_messages_per_claim
free = total - claimed
self.assertEqual(result_json['messages']['claimed'], claimed)
@ -163,7 +163,7 @@ class FunctionalTestBase(testing.TestBase):
# Verify that age has valid values
age = message['age']
self.assertTrue(0 <= age <= self.limits.message_ttl_max,
self.assertTrue(0 <= age <= self.limits.max_message_ttl,
msg='Invalid Age {0}'.format(age))
# Verify that GET on href returns 200

View File

@ -30,27 +30,25 @@ storage = sqlite
bind = 127.0.0.1
port = 8888
# Maximum Content-Length allowed for metadata updating and
# message posting.
;metadata_max_length = 65536
;content_max_length = 262144
;[drivers:transport:zmq]
;port = 9999
[limits:transport]
# The maximum number of queue records per page when listing queues
;queue_paging_uplimit = 20
# The maximum number of messages in a message posting, maximum
# number of messages per page when listing or claiming messages,
# and maximum number of messages involved in a bulk operation.
;message_paging_uplimit = 20
# Expiration limits; the minimal values are all 60 (seconds)
;message_ttl_max = 1209600
;claim_ttl_max = 43200
;claim_grace_max = 43200
;max_queues_per_page = 20
# Maximum compact-JSON (without whitespace) size in bytes allowed
# for each metadata body and each message body
;metadata_size_uplimit = 65536
;message_size_uplimit = 262144
# Maximum number of messages per page when listing messages.
;max_messages_per_page = 20
# Maximum number of messages that can be claimed at a time.
;max_messages_per_claim = 20
# Expiration limits; the minimal values are all 60 (seconds)
;max_message_ttl = 1209600
;max_claim_ttl = 43200
;max_claim_grace = 43200
# Maximum size in bytes allowed for queue metadata and bulk/single
# message post bodies (including whitespace and envelope fields).
;max_queue_metadata = 65536
;max_message_size = 262144

View File

@ -1,7 +1,3 @@
[drivers]
transport = wsgi
storage = sqlite
[limits:storage]
default_queue_paging = 1
default_message_paging = 2

View File

@ -2,6 +2,9 @@
transport = wsgi
storage = sqlite
# Test support for deprecated options
[limits:transport]
metadata_size_uplimit = 64
message_size_uplimit = 256
[transport]
max_message_size = 256

View File

@ -43,9 +43,9 @@ class TestClaims(base.FunctionalTestBase):
#Post Messages
url = self.queue_url + '/messages'
doc = helpers.create_message_body(messagecount=
self.limits.message_paging_uplimit)
for i in range(25):
doc = helpers.create_message_body(messagecount=50)
for i in range(10):
self.client.post(url, data=doc)
@ddt.data({}, dict(limit=2))
@ -84,9 +84,9 @@ class TestClaims(base.FunctionalTestBase):
def test_claim_more_than_allowed(self):
"""Claim more than max allowed per request.
Marconi allows a maximum of 20 messages per claim.
Marconi allows a maximum of 20 messages per claim by default.
"""
params = {"limit": self.limits.message_paging_uplimit + 1}
params = {"limit": self.limits.max_messages_per_claim + 1}
doc = {"ttl": 300, "grace": 100}
result = self.client.post(params=params, data=doc)

View File

@ -45,6 +45,22 @@ class TestMessages(base.FunctionalTestBase):
self.response = response.ResponseSchema(self.limits)
def tearDown(self):
self.client.delete(self.queue_url)
super(TestMessages, self).tearDown()
def _post_large_bulk_insert(self, offset):
"""Insert just under than max allowed messages."""
doc = '[{{"body": "{0}", "ttl": 300}}, {{"body": "{1}", "ttl": 120}}]'
overhead = len(doc.format('', ''))
half_size = (self.limits.max_message_size - overhead) / 2
doc = doc.format(helpers.generate_random_string(half_size),
helpers.generate_random_string(half_size + offset))
return self.client.post(data=doc)
def test_message_single_insert(self):
"""Insert Single Message into the Queue.
@ -93,7 +109,7 @@ class TestMessages(base.FunctionalTestBase):
def test_message_bulk_insert(self):
"""Bulk Insert Messages into the Queue."""
message_count = self.limits.message_paging_uplimit
message_count = self.limits.max_messages_per_page
doc = helpers.create_message_body(messagecount=message_count)
result = self.client.post(data=doc)
@ -133,7 +149,7 @@ class TestMessages(base.FunctionalTestBase):
# Test Setup
doc = helpers.create_message_body(messagecount=
self.limits.message_paging_uplimit)
self.limits.max_messages_per_page)
result = self.client.post(data=doc)
self.assertEqual(result.status_code, 201)
@ -235,17 +251,36 @@ class TestMessages(base.FunctionalTestBase):
test_message_partial_get.tags = ['negative']
def test_message_bulk_insert_60(self):
"""Insert more than max allowed messages.
@ddt.data(-10, -1, 0)
def test_message_bulk_insert_large_bodies(self, offset):
"""Insert just under than max allowed messages."""
result = self._post_large_bulk_insert(offset)
self.assertEqual(result.status_code, 201)
Marconi allows a maximum of 50 message per POST.
"""
doc = helpers.create_message_body(messagecount=60)
test_message_bulk_insert_large_bodies.tags = ['positive']
@ddt.data(1, 10)
def test_message_bulk_insert_large_bodies(self, offset):
"""Insert just under than max allowed messages."""
result = self._post_large_bulk_insert(offset)
self.assertEqual(result.status_code, 400)
test_message_bulk_insert_large_bodies.tags = ['negative']
def test_message_bulk_insert_oversized(self):
"""Insert more than max allowed size."""
doc = '[{{"body": "{0}", "ttl": 300}}, {{"body": "{1}", "ttl": 120}}]'
overhead = len(doc.format('', ''))
half_size = (self.limits.max_message_size - overhead) / 2
doc = doc.format(helpers.generate_random_string(half_size),
helpers.generate_random_string(half_size + 1))
result = self.client.post(data=doc)
self.assertEqual(result.status_code, 400)
test_message_bulk_insert_60.tags = ['negative']
test_message_bulk_insert_oversized.tags = ['negative']
@ddt.data(10000000000000000000, -100, 0, 30, -10000000000000000000)
def test_message_get_invalid_limit(self, limit):
@ -267,7 +302,7 @@ class TestMessages(base.FunctionalTestBase):
"""
url = self.message_url + '?ids=' \
+ ','.join(str(i) for i in
range(self.limits.message_paging_uplimit + 1))
range(self.limits.max_messages_per_page + 1))
result = self.client.delete(url)
self.assertEqual(result.status_code, 400)
@ -282,7 +317,7 @@ class TestMessages(base.FunctionalTestBase):
"""
url = self.message_url + '?ids=' \
+ ','.join(str(i) for i in
range(self.limits.message_paging_uplimit + 1))
range(self.limits.max_messages_per_page + 1))
result = self.client.get(url)
self.assertEqual(result.status_code, 400)
@ -336,7 +371,3 @@ class TestMessages(base.FunctionalTestBase):
self.assertEqual(result.status_code, 204)
test_delete_non_existing_message.tags = ['negative']
def tearDown(self):
super(TestMessages, self).tearDown()
self.client.delete(self.queue_url)

View File

@ -383,8 +383,9 @@ class TestQueueMisc(base.FunctionalTestBase):
self.assertEqual(result.status_code, 201)
# Post Messages to the test queue
doc = helpers.create_message_body(messagecount=
self.limits.message_paging_uplimit)
doc = helpers.create_message_body(
messagecount=self.limits.max_messages_per_claim)
message_url = self.queue_url + '/messages'
result = self.client.post(message_url, data=doc)
self.assertEqual(result.status_code, 201)

View File

@ -20,6 +20,8 @@ from marconi.queues import bootstrap
from marconi.queues.transport.wsgi import driver
from marconi import tests as testing
from marconi.queues.transport import validation
class TestBase(testing.TestBase):
@ -31,6 +33,10 @@ class TestBase(testing.TestBase):
if not self.config_file:
self.skipTest("No config specified")
self.conf.register_opts(validation._TRANSPORT_LIMITS_OPTIONS,
group=validation._TRANSPORT_LIMITS_GROUP)
self.transport_cfg = self.conf[validation._TRANSPORT_LIMITS_GROUP]
self.conf.register_opts(driver._WSGI_OPTIONS,
group=driver._WSGI_GROUP)
self.wsgi_cfg = self.conf[driver._WSGI_GROUP]

View File

@ -92,20 +92,6 @@ class ClaimsBaseTest(base.TestBase):
self.simulate_post(self.claims_path, self.project_id, body=doc)
return self.srmock.headers_dict['Location']
def test_too_much_metadata(self):
doc = '{"ttl": 100, "grace": 60}'
long_doc = doc + (' ' *
(self.wsgi_cfg.metadata_max_length - len(doc) + 1))
self.simulate_post(self.claims_path, self.project_id, body=long_doc)
self.assertEqual(self.srmock.status, falcon.HTTP_400)
self.simulate_post(self.claims_path, self.project_id, body=doc)
href = self.srmock.headers_dict['Location']
self.simulate_patch(href, self.project_id, body=long_doc)
self.assertEqual(self.srmock.status, falcon.HTTP_400)
def test_lifecycle(self):
doc = '{"ttl": 100, "grace": 60}'

View File

@ -128,10 +128,6 @@ class MessagesBaseTest(base.TestBase):
self._post_messages(self.messages_path)
msg_id = self._get_msg_id(self.srmock.headers_dict)
# Posting restriction
self._post_messages(self.messages_path, repeat=23)
self.assertEqual(self.srmock.status, falcon.HTTP_400)
# Bulk GET restriction
query_string = 'ids=' + ','.join([msg_id] * 21)
self.simulate_get(self.messages_path, self.project_id,
@ -239,8 +235,9 @@ class MessagesBaseTest(base.TestBase):
def test_exceeded_message_posting(self):
# Total (raw request) size
doc = json.dumps([{'body': "some body", 'ttl': 100}] * 20, indent=4)
long_doc = doc + (' ' *
(self.wsgi_cfg.content_max_length - len(doc) + 1))
max_len = self.transport_cfg.max_message_size
long_doc = doc + (' ' * (max_len - len(doc) + 1))
self.simulate_post(self.queue_path + '/messages',
body=long_doc,

View File

@ -161,7 +161,10 @@ class QueueLifecycleBaseTest(base.TestBase):
self.simulate_put('/v1/queues/fizbat', '7e55e1a7e')
self.assertEqual(self.srmock.status, falcon.HTTP_201)
doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}'
padding_len = self.wsgi_cfg.metadata_max_length - (len(doc) - 10) + 1
max_size = self.transport_cfg.max_queue_metadata
padding_len = max_size - (len(doc) - 10) + 1
doc = doc.format(pad='x' * padding_len)
self.simulate_put('/v1/queues/fizbat/metadata', '7e55e1a7e', body=doc)
@ -171,7 +174,10 @@ class QueueLifecycleBaseTest(base.TestBase):
self.simulate_put('/v1/queues/fizbat', '7e55e1a7e')
self.assertEqual(self.srmock.status, falcon.HTTP_201)
doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}'
padding_len = self.wsgi_cfg.metadata_max_length * 100
max_size = self.transport_cfg.max_queue_metadata
padding_len = max_size * 100
doc = doc.format(pad='x' * padding_len)
self.simulate_put('/v1/queues/fizbat/metadata', '7e55e1a7e', body=doc)
@ -183,7 +189,10 @@ class QueueLifecycleBaseTest(base.TestBase):
# Set
doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}'
padding_len = self.wsgi_cfg.metadata_max_length - (len(doc) - 2)
max_size = self.transport_cfg.max_queue_metadata
padding_len = max_size - (len(doc) - 2)
doc = doc.format(pad='x' * padding_len)
self.simulate_put('/v1/queues/fizbat/metadata', '480924', body=doc)
self.assertEqual(self.srmock.status, falcon.HTTP_204)

View File

@ -50,14 +50,14 @@ class ValidationTest(base.TestBase):
self.assertEqual(self.srmock.status, falcon.HTTP_204)
# Too long
metadata_size_uplimit = 64
max_queue_metadata = 64
doc_tmpl = '{{"Dragon Torc":"{0}"}}'
doc_tmpl_ws = '{{ "Dragon Torc" : "{0}" }}' # with whitespace
envelop_length = len(doc_tmpl.format(''))
envelope_length = len(doc_tmpl.format(''))
for tmpl in doc_tmpl, doc_tmpl_ws:
gen = '0' * (metadata_size_uplimit - envelop_length + 1)
gen = '0' * (max_queue_metadata - envelope_length + 1)
doc = tmpl.format(gen)
self.simulate_put(self.queue_path + '/metadata',
self.project_id,
@ -75,13 +75,13 @@ class ValidationTest(base.TestBase):
self.assertEqual(self.srmock.status, falcon.HTTP_201)
# Both messages' size are too long
message_size_uplimit = 256
max_message_size = 256
obj = {'a': 0, 'b': ''}
envelop_length = len(json.dumps(obj, separators=(',', ':')))
obj['b'] = 'x' * (message_size_uplimit - envelop_length + 1)
envelope_length = len(json.dumps(obj, separators=(',', ':')))
obj['b'] = 'x' * (max_message_size - envelope_length + 1)
for long_body in ('a' * (message_size_uplimit - 2 + 1), obj):
for long_body in ('a' * (max_message_size - 2 + 1), obj):
doc = json.dumps([{'body': long_body, 'ttl': 100}])
self.simulate_post(self.queue_path + '/messages',
self.project_id,

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import json
import uuid
@ -20,6 +21,8 @@ import falcon
from . import base # noqa
from marconi.queues import storage
class DefaultLimitsTest(base.TestBase):
@ -39,25 +42,19 @@ class DefaultLimitsTest(base.TestBase):
super(DefaultLimitsTest, self).tearDown()
def test_queue_listing(self):
default_queue_paging = 1
# 2 queues to list
self.simulate_put('/v1/queues/q2')
self.assertEqual(self.srmock.status, falcon.HTTP_201)
result = self.simulate_get('/v1/queues')
self.assertEqual(self.srmock.status, falcon.HTTP_200)
with self._prepare_queues(storage.DEFAULT_QUEUES_PER_PAGE + 1):
result = self.simulate_get('/v1/queues')
self.assertEqual(self.srmock.status, falcon.HTTP_200)
queues = json.loads(result[0])['queues']
self.assertEqual(len(queues), default_queue_paging)
self.simulate_delete('/v1/queues/q2')
queues = json.loads(result[0])['queues']
self.assertEqual(len(queues), storage.DEFAULT_QUEUES_PER_PAGE)
def test_message_listing(self):
default_message_paging = 2
# 10 messages to list
self.__prepare_messages(10)
self._prepare_messages(storage.DEFAULT_MESSAGES_PER_PAGE + 1)
result = self.simulate_get(self.messages_path,
headers={'Client-ID': str(uuid.uuid4())})
@ -65,13 +62,10 @@ class DefaultLimitsTest(base.TestBase):
self.assertEqual(self.srmock.status, falcon.HTTP_200)
messages = json.loads(result[0])['messages']
self.assertEqual(len(messages), default_message_paging)
self.assertEqual(len(messages), storage.DEFAULT_MESSAGES_PER_PAGE)
def test_claim_creation(self):
default_message_paging = 2
# 5 messages to claim
self.__prepare_messages(5)
self._prepare_messages(storage.DEFAULT_MESSAGES_PER_CLAIM + 1)
result = self.simulate_post(self.claims_path,
body='{"ttl": 60, "grace": 60}')
@ -79,9 +73,23 @@ class DefaultLimitsTest(base.TestBase):
self.assertEqual(self.srmock.status, falcon.HTTP_201)
messages = json.loads(result[0])
self.assertEqual(len(messages), default_message_paging)
self.assertEqual(len(messages), storage.DEFAULT_MESSAGES_PER_CLAIM)
def __prepare_messages(self, count):
@contextlib.contextmanager
def _prepare_queues(self, count):
queue_paths = ['/v1/queues/multi-{0}'.format(i)
for i in range(count)]
for path in queue_paths:
self.simulate_put(path)
self.assertEqual(self.srmock.status, falcon.HTTP_201)
yield
for path in queue_paths:
self.simulate_delete(path)
def _prepare_messages(self, count):
doc = json.dumps([{'body': 239, 'ttl': 300}] * count)
self.simulate_post(self.messages_path, body=doc,
headers={'Client-ID': 'poster'})