Merge "Remove the pool group totally"

This commit is contained in:
Zuul 2019-03-19 06:51:18 +00:00 committed by Gerrit Code Review
commit ad17a411ce
25 changed files with 159 additions and 2098 deletions

View File

@ -45,7 +45,7 @@ fi
if grep -q 'message_store *= *redis' /etc/zaqar/zaqar.conf; then
redis-cli save
cp /var/lib/redis/dump.rdb $SAVE_DIR/zaqar-redis-message-dump-$BASE_RELEASE.rdb
sudo cp /var/lib/redis/dump.rdb $SAVE_DIR/zaqar-redis-message-dump-$BASE_RELEASE.rdb
fi
# Upgrade Zaqar

View File

@ -229,9 +229,6 @@ class ResponseSchema(api.Api):
'uri': {
'type': 'string'
},
'group': {
'type': ['string', 'null']
},
'flavor': {
'type': ['string', 'null']
},
@ -240,7 +237,7 @@ class ResponseSchema(api.Api):
'additionalProperties': True
}
},
'required': ['href', 'weight', 'uri', 'group'],
'required': ['href', 'weight', 'uri'],
'additionalProperties': False,
},
}
@ -287,9 +284,6 @@ class ResponseSchema(api.Api):
'uri': {
'type': 'string'
},
'group': {
'type': ['string', 'null']
},
'flavor': {
'type': ['string', 'null']
},

View File

@ -230,9 +230,6 @@ class ResponseSchema(api.Api):
'uri': {
'type': 'string'
},
'group': {
'type': ['string', 'null']
},
'flavor': {
'type': ['string', 'null']
},
@ -241,7 +238,7 @@ class ResponseSchema(api.Api):
'additionalProperties': True
}
},
'required': ['href', 'weight', 'uri', 'group'],
'required': ['href', 'weight', 'uri'],
'additionalProperties': False,
},
}
@ -288,9 +285,6 @@ class ResponseSchema(api.Api):
'uri': {
'type': 'string'
},
'group': {
'type': ['string', 'null']
},
'flavor': {
'type': ['string', 'null']
},

View File

@ -27,29 +27,6 @@ patch_capabilities = {
}
}
# TODO(gengchc2): remove pool_group in R release.
# NOTE(flaper87): a string valid
patch_pool = {
'type': 'object',
'properties': {
'pool': {
'type': 'string'
},
'additionalProperties': False
}
}
# TODO(gengchc2): remove pool_group in R release.
patch_pool_group = {
'type': 'object',
'properties': {
'pool_group': {
'type': 'string'
},
'additionalProperties': False
}
}
# NOTE(gengchc): Add pool_list in flavor creation for removing pool_group
patch_pool_list = {
'type': 'object',
@ -64,15 +41,12 @@ patch_pool_list = {
create = {
'type': 'object',
'properties': {
'pool_group': patch_pool_group['properties']['pool_group'],
'pool': patch_pool['properties']['pool'],
'pool_list': patch_pool_list['properties']['pool_list'],
'capabilities': patch_capabilities['properties']['capabilities']
},
# NOTE(flaper87): capabilities need not be present. Storage drivers
# must provide reasonable defaults.
# NOTE(wanghao): remove pool in Newton release.
'oneOf': [{'required': ['pool_group']}, {'required': ['pool']},
{'required': ['pool_list']}],
'oneOf': [{'required': ['pool_list']}],
'additionalProperties': False
}

View File

@ -38,17 +38,6 @@ patch_uri = {
}
}
patch_group = {
'type': 'object', 'properties': {
'uri': {
'type': 'string',
'minLength': 0,
'maxLength': 255
},
'additionalProperties': False
}
}
# NOTE(gengchc): remove pool_group add flavor
patch_flavor = {
'type': 'object', 'properties': {
@ -73,7 +62,6 @@ patch_weight = {
create = {
'type': 'object', 'properties': {
'weight': patch_weight['properties']['weight'],
'group': patch_group['properties']['uri'],
'flavor': patch_flavor['properties']['flavor'],
'uri': patch_uri['properties']['uri'],
'options': patch_options['properties']['options']

View File

@ -798,7 +798,7 @@ class PoolsBase(ControllerBase):
_list = abc.abstractmethod(lambda x: None)
def create(self, name, weight, uri, group=None, flavor=None, options=None):
def create(self, name, weight, uri, flavor=None, options=None):
"""Registers a pool entry.
:param name: The name of this pool
@ -816,12 +816,10 @@ class PoolsBase(ControllerBase):
flavor_obj = {}
if flavor is not None:
flavor_obj["name"] = flavor
if group is not None:
flavor_obj["pool_group"] = group
if not self._check_capabilities(uri, flavor=flavor_obj):
raise errors.PoolCapabilitiesMismatch()
return self._create(name, weight, uri, group, flavor, options)
return self._create(name, weight, uri, flavor, options)
_create = abc.abstractmethod(lambda x: None)
@ -1020,7 +1018,7 @@ class FlavorsBase(ControllerBase):
raise NotImplementedError
@abc.abstractmethod
def create(self, name, pool_group=None, project=None, capabilities=None):
def create(self, name, project=None, capabilities=None):
"""Registers a flavor entry.
:param name: The name of this flavor

View File

@ -20,8 +20,6 @@ Schema:
'c': capabilities :: dict
"""
import functools
from zaqar.storage import base
from zaqar.storage import errors
from zaqar.storage.mongodb import utils
@ -60,15 +58,6 @@ class FlavorsController(base.FlavorsBase):
self._pools_ctrl = self.driver.pools_controller
@utils.raises_conn_error
def _list_by_pool_group(self, pool_group, limit=10, detailed=False):
query = {'s': pool_group}
cursor = self._col.find(query, projection=_field_spec(detailed),
limit=limit).sort('n', 1)
normalizer = functools.partial(_normalize, detailed=detailed)
return utils.HookedCursor(cursor, normalizer)
@utils.raises_conn_error
def list(self, project=None, marker=None, limit=10, detailed=False):
query = {'p': project}
@ -97,7 +86,7 @@ class FlavorsController(base.FlavorsBase):
return _normalize(res, detailed)
@utils.raises_conn_error
def create(self, name, pool_group=None, project=None, capabilities=None):
def create(self, name, project=None, capabilities=None):
# NOTE(flaper87): Check if there are pools in this group.
# Should there be a `group_exists` method?
@ -105,15 +94,9 @@ class FlavorsController(base.FlavorsBase):
# so we don't need to get the pool by group.
# NOTE(gengchc2): If you do not use the removal group scheme to
# configure flavor, pool_group can be None..
if pool_group is not None:
flavor_obj = {}
flavor_obj["pool_group"] = pool_group
if not list(self._pools_ctrl.get_pools_by_flavor(flavor_obj)):
raise errors.PoolGroupDoesNotExist(pool_group)
capabilities = {} if capabilities is None else capabilities
self._col.update_one({'n': name, 'p': project},
{'$set': {'s': pool_group, 'c': capabilities}},
{'$set': {'c': capabilities}},
upsert=True)
@utils.raises_conn_error
@ -121,17 +104,15 @@ class FlavorsController(base.FlavorsBase):
return self._col.find_one({'n': name, 'p': project}) is not None
@utils.raises_conn_error
def update(self, name, project=None, pool_group=None, capabilities=None):
def update(self, name, project=None, capabilities=None):
fields = {}
if capabilities is not None:
fields['c'] = capabilities
if pool_group is not None:
fields['s'] = pool_group
# NOTE(gengchc2): If you do not use the removal group scheme to
# configure flavor, pool_group can be None, pool_group can be remove.
assert fields, '`pool_group` or `capabilities` not found in kwargs'
assert fields, '`capabilities` not found in kwargs'
res = self._col.update_one({'n': name, 'p': project},
{'$set': fields},
upsert=False)
@ -152,7 +133,6 @@ class FlavorsController(base.FlavorsBase):
def _normalize(flavor, detailed=False):
ret = {
'name': flavor['n'],
'pool_group': flavor['s'],
}
if detailed:

View File

@ -24,6 +24,7 @@ Schema:
"""
import functools
from oslo_log import log as logging
from pymongo import errors as mongo_error
from zaqar.common import utils as common_utils
@ -35,6 +36,8 @@ POOLS_INDEX = [
('n', 1)
]
LOG = logging.getLogger(__name__)
URI_INDEX = [
('u', 1)
]
@ -95,8 +98,6 @@ class PoolsController(base.PoolsBase):
query = None
if flavor is None:
query = {'f': None}
elif flavor.get("pool_group") is not None:
query = {'g': flavor.get("pool_group")}
elif flavor.get('name') is not None:
query = {'f': flavor.get('name')}
cursor = self._col.find(query,
@ -105,7 +106,7 @@ class PoolsController(base.PoolsBase):
return utils.HookedCursor(cursor, normalizer)
@utils.raises_conn_error
def _create(self, name, weight, uri, group=None, flavor=None,
def _create(self, name, weight, uri, flavor=None,
options=None):
options = {} if options is None else options
try:
@ -113,11 +114,11 @@ class PoolsController(base.PoolsBase):
{'$set': {'n': name,
'w': weight,
'u': uri,
'g': group,
'f': flavor,
'o': options}},
upsert=True)
except mongo_error.DuplicateKeyError:
except mongo_error.DuplicateKeyError as ex:
LOG.exception(ex)
raise errors.PoolAlreadyExists()
@utils.raises_conn_error
@ -126,19 +127,16 @@ class PoolsController(base.PoolsBase):
@utils.raises_conn_error
def _update(self, name, **kwargs):
names = ('uri', 'weight', 'group', 'flavor', 'options')
names = ('uri', 'weight', 'flavor', 'options')
fields = common_utils.fields(kwargs, names,
pred=lambda x: x is not None,
key_transform=lambda x: x[0])
assert fields, ('`weight`, `uri`, `group`, '
assert fields, ('`weight`, `uri`, '
'or `options` not found in kwargs')
flavor = fields.get('f')
if flavor is not None and len(flavor) == 0:
fields['f'] = None
group = fields.get('g')
if group is not None and len(group) == 0:
fields['g'] = None
res = self._col.update_one({'n': name},
{'$set': fields},
@ -153,20 +151,6 @@ class PoolsController(base.PoolsBase):
# recursion error.
try:
pool = self.get(name)
if pool['group'] is not None:
flavor = {}
flavor['pool_group'] = pool['group']
pools_group = self.get_pools_by_flavor(flavor=flavor)
flavor_ctl = self.driver.flavors_controller
res = list(flavor_ctl._list_by_pool_group(pool['group']))
# NOTE(flaper87): If this is the only pool in the
# group and it's being used by a flavor, don't allow
# it to be deleted.
if res and len(pools_group) == 1:
flavors = ', '.join([x['name'] for x in res])
raise errors.PoolInUseByFlavor(name, flavors)
pools_in_flavor = []
flavor = pool.get("flavor", None)
if flavor is not None:
@ -191,7 +175,6 @@ class PoolsController(base.PoolsBase):
def _normalize(pool, detailed=False):
ret = {
'name': pool['n'],
'group': pool['g'],
'flavor': pool['f'],
'uri': pool['u'],
'weight': pool['w'],

View File

@ -531,11 +531,9 @@ class Catalog(object):
detailed=True)
pool = select.weighted(pools)
pool = pool and pool['name'] or None
msgtmpl = _(u'register queue to pool: new flavor:%(flavor)s'
' pool_group:%(pool_group)s')
msgtmpl = _(u'register queue to pool: new flavor:%(flavor)s')
LOG.info(msgtmpl,
{'flavor': flavor.get('name', None),
'pool_group': flavor.get('pool_group', None)})
{'flavor': flavor.get('name', None)})
else:
# NOTE(flaper87): Get pools assigned to the default
# group `None`. We should consider adding a `default_group`

View File

@ -72,33 +72,19 @@ class FlavorsController(base.FlavorsBase):
return _normalize(flavor, detailed)
@utils.raises_conn_error
def create(self, name, pool_group=None, project=None, capabilities=None):
def create(self, name, project=None, capabilities=None):
cap = None if capabilities is None else utils.json_encode(capabilities)
try:
if pool_group is not None:
stmt = sa.sql.expression.insert(tables.Flavors).values(
name=name, pool_group=pool_group, project=project,
capabilities=cap
)
else:
stmt = sa.sql.expression.insert(tables.Flavors).values(
name=name, project=project,
capabilities=cap
)
self.driver.run(stmt)
except oslo_db.exception.DBDuplicateEntry:
# NOTE(gengchc2): If you do not use the removal group scheme to
# configure flavor, pool_group can be None..
if pool_group is not None:
flavor_obj = {}
flavor_obj["pool_group"] = pool_group
if not list(self._pools_ctrl.get_pools_by_flavor(flavor_obj)):
raise errors.PoolGroupDoesNotExist(pool_group)
# TODO(flaper87): merge update/create into a single
# method with introduction of upsert
self.update(name, pool_group=pool_group,
self.update(name,
project=project,
capabilities=capabilities)
@ -111,16 +97,13 @@ class FlavorsController(base.FlavorsBase):
return self.driver.run(stmt).fetchone() is not None
@utils.raises_conn_error
def update(self, name, project=None, pool_group=None, capabilities=None):
def update(self, name, project=None, capabilities=None):
fields = {}
if capabilities is not None:
fields['capabilities'] = capabilities
if pool_group is not None:
fields['pool_group'] = pool_group
assert fields, '`pool_group` or `capabilities` not found in kwargs'
assert fields, '`capabilities` not found in kwargs'
if 'capabilities' in fields:
fields['capabilities'] = utils.json_encode(fields['capabilities'])
@ -149,11 +132,10 @@ class FlavorsController(base.FlavorsBase):
def _normalize(flavor, detailed=False):
ret = {
'name': flavor[0],
'pool_group': flavor[2],
}
if detailed:
capabilities = flavor[3]
capabilities = flavor[2]
ret['capabilities'] = (utils.json_decode(capabilities)
if capabilities else {})

View File

@ -0,0 +1,52 @@
# Copyright 2017 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stein release
Revision ID: 006
Revises: 007
Create Date: 2019-01-09 11:45:45.928605
"""
# revision identifiers, used by Alembic.
revision = '007'
down_revision = '006'
from alembic import op
import sqlalchemy as sa
MYSQL_ENGINE = 'InnoDB'
MYSQL_CHARSET = 'utf8'
def upgrade():
op.drop_constraint(constraint_name='Pools_ibfk_1',
table_name='Pools',
type_='foreignkey')
op.drop_constraint(constraint_name='Flavors_ibfk_1',
table_name='Flavors',
type_='foreignkey')
op.drop_column('Pools', 'group')
op.drop_column('Flavors', 'pool_group')
op.execute('drop table PoolGroup ')
def downgrade():
op.add_column('Pools', sa.Column('group', sa.String(64), nullable=True))
op.add_column('Flavors',
sa.Column('pool_group', sa.String(64), nullable=True))
op.create_table('PoolGroup',
sa.Column('name', sa.String(64), primary_key=True))

View File

@ -38,7 +38,10 @@ class PoolsController(base.PoolsBase):
# TODO(cpp-cabrera): optimization - limit the columns returned
# when detailed=False by specifying them in the select()
# clause
stmt = sa.sql.select([tables.Pools]).where(
stmt = sa.sql.select([tables.Pools.c.name, tables.Pools.c.uri,
tables.Pools.c.weight,
tables.Pools.c.options,
tables.Pools.c.flavor]).where(
tables.Pools.c.name > marker
)
if limit > 0:
@ -57,15 +60,9 @@ class PoolsController(base.PoolsBase):
@utils.raises_conn_error
def _get_pools_by_flavor(self, flavor=None, detailed=False):
pool_group = flavor.get("pool_group", None) if flavor is not None\
else None
flavor_name = flavor.get("name", None) if flavor is not None\
else None
if pool_group is not None:
stmt = sa.sql.select([tables.Pools]).where(
tables.Pools.c.group == pool_group
)
elif flavor_name is not None:
if flavor_name is not None:
stmt = sa.sql.select([tables.Pools]).where(
tables.Pools.c.flavor == flavor_name
)
@ -99,16 +96,13 @@ class PoolsController(base.PoolsBase):
# TODO(cpp-cabrera): rename to upsert
@utils.raises_conn_error
def _create(self, name, weight, uri, group=None, flavor=None,
def _create(self, name, weight, uri, flavor=None,
options=None):
opts = None if options is None else utils.json_encode(options)
if group is not None:
self._ensure_group_exists(group)
try:
stmt = sa.sql.expression.insert(tables.Pools).values(
name=name, weight=weight, uri=uri, group=group,
name=name, weight=weight, uri=uri,
flavor=flavor, options=opts
)
self.driver.run(stmt)
@ -117,7 +111,7 @@ class PoolsController(base.PoolsBase):
# TODO(cpp-cabrera): merge update/create into a single
# method with introduction of upsert
self._update(name, weight=weight, uri=uri,
group=group, flavor=flavor, options=options)
flavor=flavor, options=options)
@utils.raises_conn_error
def _exists(self, name):
@ -131,19 +125,16 @@ class PoolsController(base.PoolsBase):
# NOTE(cpp-cabrera): by pruning None-valued kwargs, we avoid
# overwriting the existing options field with None, since that
# one can be null.
names = ('uri', 'weight', 'group', 'flavor', 'options')
names = ('uri', 'weight', 'flavor', 'options')
fields = common_utils.fields(kwargs, names,
pred=lambda x: x is not None)
assert fields, ('`weight`, `uri`, `group`, `flavor`, '
assert fields, ('`weight`, `uri`, `flavor`, '
'or `options` not found in kwargs')
if 'options' in fields:
fields['options'] = utils.json_encode(fields['options'])
if fields.get('group') is not None:
self._ensure_group_exists(fields.get('group'))
stmt = sa.sql.update(tables.Pools).where(
tables.Pools.c.name == name).values(**fields)
@ -162,20 +153,17 @@ class PoolsController(base.PoolsBase):
def _drop_all(self):
stmt = sa.sql.expression.delete(tables.Pools)
self.driver.run(stmt)
stmt = sa.sql.expression.delete(tables.PoolGroup)
self.driver.run(stmt)
def _normalize(pool, detailed=False):
ret = {
'name': pool[0],
'group': pool[1],
'uri': pool[2],
'weight': pool[3],
'flavor': pool[5],
'uri': pool[1],
'weight': pool[2],
'flavor': pool[4],
}
if detailed:
opts = pool[4]
opts = pool[3]
ret['options'] = utils.json_decode(opts) if opts else {}
return ret

View File

@ -24,15 +24,8 @@ Queues = sa.Table('Queues', metadata,
sa.UniqueConstraint('project', 'name'),
)
PoolGroup = sa.Table('PoolGroup', metadata,
sa.Column('name', sa.String(64), primary_key=True))
Pools = sa.Table('Pools', metadata,
sa.Column('name', sa.String(64), primary_key=True),
sa.Column('group', sa.ForeignKey('PoolGroup.name',
ondelete='CASCADE'),
nullable=True),
sa.Column('uri', sa.String(255),
unique=True, nullable=False),
sa.Column('weight', sa.INTEGER, nullable=False),
@ -45,9 +38,6 @@ Pools = sa.Table('Pools', metadata,
Flavors = sa.Table('Flavors', metadata,
sa.Column('name', sa.String(64), primary_key=True),
sa.Column('project', sa.String(64)),
sa.Column('pool_group', sa.ForeignKey('PoolGroup.name',
ondelete='CASCADE'),
nullable=True),
sa.Column('capabilities', sa.Text()))
Catalogue = sa.Table('Catalogue', metadata,

View File

@ -1529,11 +1529,10 @@ class PoolsControllerTest(ControllerBaseTest):
# Let's create one pool
self.pool = str(uuid.uuid1())
# NOTE(gengchc2): remove pool_group in Rocky release.
self.pool_group = str(uuid.uuid1())
self.pool1 = str(uuid.uuid1())
self.flavor = str(uuid.uuid1())
self.pools_controller.create(self.pool1, 100, 'localhost1',
self.uri = str(uuid.uuid1())
self.pools_controller.create(self.pool1, 100, self.uri,
flavor=self.flavor, options={})
self.flavors_controller = self.driver.flavors_controller
@ -1567,12 +1566,12 @@ class PoolsControllerTest(ControllerBaseTest):
self.assertEqual(xlocation, pool['uri'])
def test_get_returns_expected_content(self):
res = self.pools_controller.get(self.pool)
self._pool_expects(res, self.pool, 100, 'localhost')
res = self.pools_controller.get(self.pool1)
self._pool_expects(res, self.pool1, 100, self.uri)
self.assertNotIn('options', res)
def test_detailed_get_returns_expected_content(self):
res = self.pools_controller.get(self.pool, detailed=True)
res = self.pools_controller.get(self.pool1, detailed=True)
self.assertIn('options', res)
self.assertEqual({}, res['options'])
@ -1581,7 +1580,7 @@ class PoolsControllerTest(ControllerBaseTest):
self.pools_controller.get, 'notexists')
def test_exists(self):
self.assertTrue(self.pools_controller.exists(self.pool))
self.assertTrue(self.pools_controller.exists(self.pool1))
self.assertFalse(self.pools_controller.exists('notexists'))
def test_update_raises_assertion_error_on_bad_fields(self):
@ -1591,19 +1590,20 @@ class PoolsControllerTest(ControllerBaseTest):
def test_update_works(self):
# NOTE(flaper87): This may fail for redis. Create
# a dummy store for tests.
self.pools_controller.update(self.pool, weight=101,
uri='localhost3',
self.uri3 = str(uuid.uuid1())
self.pools_controller.update(self.pool1, weight=101,
uri=self.uri3,
options={'a': 1})
res = self.pools_controller.get(self.pool, detailed=True)
self._pool_expects(res, self.pool, 101, 'localhost3')
res = self.pools_controller.get(self.pool1, detailed=True)
self._pool_expects(res, self.pool1, 101, self.uri3)
self.assertEqual({'a': 1}, res['options'])
def test_delete_works(self):
self.pools_controller.delete(self.pool)
# self.pools_controller.delete(self.pool)
# (gengchc): Remove the flavor from pool, then testcase cleanup pool
self.pools_controller.update(self.pool1, flavor="")
self.pools_controller.delete(self.pool1)
self.assertFalse(self.pools_controller.exists(self.pool))
self.assertFalse(self.pools_controller.exists(self.pool1))
def test_delete_nonexistent_is_silent(self):
self.pools_controller.delete('nonexisting')
@ -1692,15 +1692,21 @@ class CatalogueControllerTest(ControllerBaseTest):
self.project = six.text_type(uuid.uuid4())
self.pool = str(uuid.uuid1())
self.pool_ctrl.create(self.pool, 100, 'localhost', options={})
self.flavor = str(uuid.uuid1())
self.uri = str(uuid.uuid1())
self.uri1 = str(uuid.uuid1())
self.pool_ctrl.create(self.pool, 100, self.uri,
flavor=self.flavor, options={})
self.addCleanup(self.pool_ctrl.delete, self.pool)
self.pool1 = str(uuid.uuid1())
self.flavor = str(uuid.uuid1())
self.pool_ctrl.create(self.pool1, 100, 'localhost1',
options={})
self.pool_ctrl.create(self.pool1, 100, self.uri1,
flavor=self.flavor, options={})
self.addCleanup(self.pool_ctrl.delete, self.pool1)
def tearDown(self):
self.pool_ctrl.update(self.pool, flavor="")
self.pool_ctrl.update(self.pool1, flavor="")
self.pool_ctrl.drop_all()
self.controller.drop_all()
super(CatalogueControllerTest, self).tearDown()
@ -1810,185 +1816,6 @@ class CatalogueControllerTest(ControllerBaseTest):
self.controller.insert(self.project, q2, u'a')
# NOTE(gengchc2): remove FlavorsControllerTest in Rocky release
# and use FlavorsControllerTest1 instead for pool_group removal.
class FlavorsControllerTest(ControllerBaseTest):
"""Flavors Controller base tests.
NOTE(flaper87): Implementations of this class should
override the tearDown method in order
to clean up storage's state.
"""
controller_base_class = storage.FlavorsBase
def setUp(self):
super(FlavorsControllerTest, self).setUp()
self.pools_controller = self.driver.pools_controller
self.flavors_controller = self.driver.flavors_controller
# Let's create one pool
self.pool = str(uuid.uuid1())
self.pool_group = str(uuid.uuid1())
self.pools_controller.create(self.pool, 100, 'localhost',
group=self.pool_group, options={})
self.addCleanup(self.pools_controller.delete, self.pool)
def tearDown(self):
self.flavors_controller.drop_all()
super(FlavorsControllerTest, self).tearDown()
def test_create_succeeds(self):
self.flavors_controller.create('durable', self.pool_group,
project=self.project,
capabilities={})
def _flavors_expects(self, flavor, xname, xproject, xpool):
self.assertIn('name', flavor)
self.assertEqual(xname, flavor['name'])
self.assertNotIn('project', flavor)
self.assertIn('pool_group', flavor)
self.assertEqual(xpool, flavor['pool_group'])
def test_create_replaces_on_duplicate_insert(self):
name = str(uuid.uuid1())
self.flavors_controller.create(name, self.pool_group,
project=self.project,
capabilities={})
pool2 = 'another_pool'
self.pools_controller.create(pool2, 100, 'localhost:27017',
group=pool2, options={})
self.addCleanup(self.pools_controller.delete, pool2)
self.flavors_controller.create(name, pool2,
project=self.project,
capabilities={})
entry = self.flavors_controller.get(name, project=self.project)
self._flavors_expects(entry, name, self.project, pool2)
def test_get_returns_expected_content(self):
name = 'durable'
capabilities = {'fifo': True}
self.flavors_controller.create(name, self.pool_group,
project=self.project,
capabilities=capabilities)
res = self.flavors_controller.get(name, project=self.project)
self._flavors_expects(res, name, self.project, self.pool_group)
self.assertNotIn('capabilities', res)
def test_detailed_get_returns_expected_content(self):
name = 'durable'
capabilities = {'fifo': True}
self.flavors_controller.create(name, self.pool_group,
project=self.project,
capabilities=capabilities)
res = self.flavors_controller.get(name, project=self.project,
detailed=True)
self._flavors_expects(res, name, self.project, self.pool_group)
self.assertIn('capabilities', res)
self.assertEqual(capabilities, res['capabilities'])
def test_get_raises_if_not_found(self):
self.assertRaises(errors.FlavorDoesNotExist,
self.flavors_controller.get, 'notexists')
def test_exists(self):
self.flavors_controller.create('exists', self.pool_group,
project=self.project,
capabilities={})
self.assertTrue(self.flavors_controller.exists('exists',
project=self.project))
self.assertFalse(self.flavors_controller.exists('notexists',
project=self.project))
def test_update_raises_assertion_error_on_bad_fields(self):
self.assertRaises(AssertionError, self.pools_controller.update,
self.pool_group)
def test_update_works(self):
name = 'yummy'
self.flavors_controller.create(name, self.pool_group,
project=self.project,
capabilities={})
res = self.flavors_controller.get(name, project=self.project,
detailed=True)
p = 'olympic'
pool_group = 'sports'
self.pools_controller.create(p, 100, 'localhost2',
group=pool_group, options={})
self.addCleanup(self.pools_controller.delete, p)
new_capabilities = {'fifo': False}
self.flavors_controller.update(name, project=self.project,
pool_group=pool_group,
capabilities={'fifo': False})
res = self.flavors_controller.get(name, project=self.project,
detailed=True)
self._flavors_expects(res, name, self.project, pool_group)
self.assertEqual(new_capabilities, res['capabilities'])
def test_delete_works(self):
name = 'puke'
self.flavors_controller.create(name, self.pool_group,
project=self.project,
capabilities={})
self.flavors_controller.delete(name, project=self.project)
self.assertFalse(self.flavors_controller.exists(name))
def test_delete_nonexistent_is_silent(self):
self.flavors_controller.delete('nonexisting')
def test_drop_all_leads_to_empty_listing(self):
self.flavors_controller.drop_all()
cursor = self.flavors_controller.list()
flavors = next(cursor)
self.assertRaises(StopIteration, next, flavors)
self.assertFalse(next(cursor))
def test_listing_simple(self):
name_gen = lambda i: chr(ord('A') + i)
for i in range(15):
pool = str(i)
pool_group = pool
uri = 'localhost:2701' + pool
self.pools_controller.create(pool, 100, uri,
group=pool_group, options={})
self.addCleanup(self.pools_controller.delete, pool)
self.flavors_controller.create(name_gen(i), project=self.project,
pool_group=pool_group,
capabilities={})
def get_res(**kwargs):
cursor = self.flavors_controller.list(project=self.project,
**kwargs)
res = list(next(cursor))
marker = next(cursor)
self.assertTrue(marker)
return res
res = get_res()
self.assertEqual(10, len(res))
for i, entry in enumerate(res):
self._flavors_expects(entry, name_gen(i), self.project, str(i))
self.assertNotIn('capabilities', entry)
res = get_res(limit=5)
self.assertEqual(5, len(res))
res = get_res(marker=name_gen(3))
self._flavors_expects(res[0], name_gen(4), self.project, '4')
res = get_res(detailed=True)
self.assertEqual(10, len(res))
for i, entry in enumerate(res):
self._flavors_expects(entry, name_gen(i), self.project, str(i))
self.assertIn('capabilities', entry)
self.assertEqual({}, entry['capabilities'])
# NOTE(gengchc2): Unittest for new flavor configure scenario.
class FlavorsControllerTest1(ControllerBaseTest):
"""Flavors Controller base tests.
@ -2007,12 +1834,14 @@ class FlavorsControllerTest1(ControllerBaseTest):
# Let's create one pool
self.pool = str(uuid.uuid1())
self.flavor = 'durable'
self.pools_controller.create(self.pool, 100, 'localhost',
options={})
self.uri = str(uuid.uuid1())
self.pools_controller.create(self.pool, 100, self.uri,
flavor=self.flavor, options={})
self.addCleanup(self.pools_controller.delete, self.pool)
def tearDown(self):
self.pools_controller.update(self.pool, flavor="")
self.pools_controller.drop_all()
self.flavors_controller.drop_all()
super(FlavorsControllerTest1, self).tearDown()
@ -2092,7 +1921,8 @@ class FlavorsControllerTest1(ControllerBaseTest):
p = 'olympic'
flavor = name
self.pools_controller.create(p, 100, 'localhost2',
self.uri2 = str(uuid.uuid1())
self.pools_controller.create(p, 100, self.uri2,
flavor=flavor, options={})
self.addCleanup(self.pools_controller.delete, p)

View File

@ -508,22 +508,16 @@ class MongodbPoolsTests(base.PoolsControllerTest):
def setUp(self):
super(MongodbPoolsTests, self).setUp()
self.pools_controller.create(self.pool, 100, 'localhost',
group=self.pool_group, options={})
self.uri2 = str(uuid.uuid1())
self.flavor2 = str(uuid.uuid1())
self.pools_controller.create(self.pool, 100, self.uri2,
flavor=self.flavor2, options={})
def tearDown(self):
# self.pool_ctrl.update(self.pool, flavor="")
self.pools_controller.drop_all()
super(MongodbPoolsTests, self).tearDown()
# NOTE(gengchc2): remove test_delete_pool_used_by_flavor in Rocky release
# and use test_delete_pool_used_by_flavor1 instead for pool_group removal.
def test_delete_pool_used_by_flavor(self):
self.flavors_controller.create('durable', self.pool_group,
project=self.project,
capabilities={})
with testing.expect(errors.PoolInUseByFlavor):
self.pools_controller.delete(self.pool)
# NOTE(gengchc2): Unittest for new flavor configure scenario.
def test_delete_pool_used_by_flavor1(self):
self.flavors_controller.create(self.flavor,
@ -534,16 +528,6 @@ class MongodbPoolsTests(base.PoolsControllerTest):
with testing.expect(errors.PoolInUseByFlavor):
self.pools_controller.delete(self.pool1)
# NOTE(gengchc2): remove test_mismatching_capabilities_fifo in Rocky
# release and use test_mismatching_capabilities_fifo1 instead for
# pool_group removal.
def test_mismatching_capabilities_fifo(self):
with testing.expect(errors.PoolCapabilitiesMismatch):
self.pools_controller.create(str(uuid.uuid1()),
100, 'mongodb.fifo://localhost',
group=self.pool_group,
options={})
# NOTE(gengchc2): Unittest for new flavor configure scenario.
def test_mismatching_capabilities_fifo1(self):
with testing.expect(errors.PoolCapabilitiesMismatch):
@ -552,17 +536,6 @@ class MongodbPoolsTests(base.PoolsControllerTest):
flavor=self.flavor,
options={})
# NOTE(gengchc2): remove test_mismatching_capabilities in Rocky release
# and use test_mismatching_capabilities1 instead for pool_group removal.
def test_mismatching_capabilities(self):
# NOTE(gengchc2): This test is used for testing mismatchming
# capabilities in pool with group
with testing.expect(errors.PoolCapabilitiesMismatch):
self.pools_controller.create(str(uuid.uuid1()),
100, 'redis://localhost',
group=self.pool_group,
options={})
def test_mismatching_capabilities1(self):
# NOTE(gengchc2): This test is used for testing mismatchming
# capabilities in pool with flavor
@ -572,21 +545,12 @@ class MongodbPoolsTests(base.PoolsControllerTest):
flavor=self.flavor,
options={})
# NOTE(gengchc2): remove test_duplicate_uri in Rocky release and
# use test_duplicate_uri1 instead for pool_group removal.
def test_duplicate_uri(self):
with testing.expect(errors.PoolAlreadyExists):
# The url 'localhost' is used in setUp(). So reusing the uri
# 'localhost' here will raise PoolAlreadyExists.
self.pools_controller.create(str(uuid.uuid1()), 100, 'localhost',
group=str(uuid.uuid1()), options={})
# NOTE(gengchc2): Unittest for new flavor configure scenario.
def test_duplicate_uri1(self):
with testing.expect(errors.PoolAlreadyExists):
# The url 'localhost' is used in setUp(). So reusing the uri
# 'localhost' here will raise PoolAlreadyExists.
self.pools_controller.create(str(uuid.uuid1()), 100, 'localhost',
self.pools_controller.create(str(uuid.uuid1()), 100, self.uri,
flavor=str(uuid.uuid1()), options={})
@ -633,20 +597,6 @@ class PooledClaimsTests(base.ClaimControllerTest):
self.skip("Fix sqlalchemy driver")
# NOTE(gengchc2): remove MongodbFlavorsTest in Rocky release and
# use MongodbFlavorsTest1 instead for pool_group removal.
@testing.requires_mongodb
class MongodbFlavorsTest(base.FlavorsControllerTest):
driver_class = mongodb.ControlDriver
controller_class = controllers.FlavorsController
control_driver_class = mongodb.ControlDriver
config_file = 'wsgi_mongodb.conf'
def setUp(self):
super(MongodbFlavorsTest, self).setUp()
self.addCleanup(self.controller.drop_all)
# NOTE(gengchc2): Unittest for new flavor configure scenario.
@testing.requires_mongodb
class MongodbFlavorsTest1(base.FlavorsControllerTest1):

View File

@ -45,19 +45,8 @@ class SqlalchemyPoolsTest(DBCreateMixin, base.PoolsControllerTest):
def setUp(self):
super(SqlalchemyPoolsTest, self).setUp()
self.pools_controller.create(self.pool, 100, 'localhost',
group=self.pool_group, options={})
# NOTE(gengchc2): remove test_mismatching_capabilities in Rocky release
# and use test_mismatching_capabilities1 instead for pool_group removal.
def test_mismatching_capabilities(self):
# NOTE(gengchc2): This test is used for testing mismatchming
# capabilities in pool with group
with testing.expect(storage.errors.PoolCapabilitiesMismatch):
self.pools_controller.create(str(uuid.uuid1()),
100, 'redis://localhost',
group=self.pool_group,
options={})
# self.pools_controller.create(self.pool, 100, 'localhost',
# group=self.pool_group, options={})
def test_mismatching_capabilities1(self):
# NOTE(gengchc2): This test is used for testing mismatchming
@ -76,15 +65,6 @@ class SqlalchemyCatalogueTest(DBCreateMixin, base.CatalogueControllerTest):
control_driver_class = sqlalchemy.ControlDriver
# NOTE(gengchc2): remove SqlalchemyFlavorsTest in Rocky release and
# use SqlalchemyFlavorsTest1 instead for pool_group removal.
class SqlalchemyFlavorsTest(DBCreateMixin, base.FlavorsControllerTest):
config_file = 'wsgi_sqlalchemy.conf'
driver_class = sqlalchemy.ControlDriver
controller_class = controllers.FlavorsController
control_driver_class = sqlalchemy.ControlDriver
# NOTE(gengchc2): Unittest for new flavor configure scenario.
class SqlalchemyFlavorsTest1(DBCreateMixin, base.FlavorsControllerTest1):
config_file = 'wsgi_sqlalchemy.conf'

View File

@ -1,130 +0,0 @@
# Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import mock
import uuid
from zaqar.common import cache as oslo_cache
from zaqar.storage import errors
from zaqar.storage import mongodb
from zaqar.storage import pooling
from zaqar.storage import utils
from zaqar import tests as testing
# TODO(cpp-cabrera): it would be wonderful to refactor this unit test
# so that it could use multiple control storage backends once those
# have pools/catalogue implementations.
@testing.requires_mongodb
class PoolCatalogTest(testing.TestBase):
config_file = 'wsgi_mongodb_pooled_disable_virtual_pool.conf'
def setUp(self):
super(PoolCatalogTest, self).setUp()
oslo_cache.register_config(self.conf)
cache = oslo_cache.get_cache(self.conf)
control = utils.load_storage_driver(self.conf, cache,
control_mode=True)
self.pools_ctrl = control.pools_controller
self.flavors_ctrl = control.flavors_controller
self.catalogue_ctrl = control.catalogue_controller
# NOTE(cpp-cabrera): populate catalogue
self.pool = str(uuid.uuid1())
self.pool2 = str(uuid.uuid1())
self.pool_group = 'pool-group'
self.queue = str(uuid.uuid1())
self.flavor = str(uuid.uuid1())
self.project = str(uuid.uuid1())
# FIXME(therve) This is horrible, we need to manage duplication in a
# nicer way
if 'localhost' in self.mongodb_url:
other_url = self.mongodb_url.replace('localhost', '127.0.0.1')
elif '127.0.0.1' in self.mongodb_url:
other_url = self.mongodb_url.replace('127.0.0.1', 'localhost')
else:
self.skipTest("Can't build a dummy mongo URL.")
self.pools_ctrl.create(self.pool, 100, self.mongodb_url)
self.pools_ctrl.create(self.pool2, 100,
other_url,
group=self.pool_group)
self.catalogue_ctrl.insert(self.project, self.queue, self.pool)
self.catalog = pooling.Catalog(self.conf, cache, control)
self.flavors_ctrl.create(self.flavor, self.pool_group,
project=self.project)
def tearDown(self):
self.catalogue_ctrl.drop_all()
self.pools_ctrl.drop_all()
super(PoolCatalogTest, self).tearDown()
def test_lookup_loads_correct_driver(self):
storage = self.catalog.lookup(self.queue, self.project)
self.assertIsInstance(storage._storage, mongodb.DataDriver)
def test_lookup_returns_default_or_none_if_queue_not_mapped(self):
# Return default
self.assertIsNone(self.catalog.lookup('not', 'mapped'))
self.config(message_store='faulty', group='drivers')
self.config(enable_virtual_pool=True, group='pooling:catalog')
self.assertIsNotNone(self.catalog.lookup('not', 'mapped'))
def test_lookup_returns_none_if_entry_deregistered(self):
self.catalog.deregister(self.queue, self.project)
self.assertIsNone(self.catalog.lookup(self.queue, self.project))
def test_register_leads_to_successful_lookup(self):
self.catalog.register('not_yet', 'mapped')
storage = self.catalog.lookup('not_yet', 'mapped')
self.assertIsInstance(storage._storage, mongodb.DataDriver)
def test_register_with_flavor(self):
queue = 'test'
self.catalog.register(queue, project=self.project,
flavor=self.flavor)
storage = self.catalog.lookup(queue, self.project)
self.assertIsInstance(storage._storage, mongodb.DataDriver)
def test_register_with_fake_flavor(self):
self.assertRaises(errors.FlavorDoesNotExist,
self.catalog.register,
'test', project=self.project,
flavor='fake')
def test_queues_list_on_multi_pools(self):
def fake_list(project=None, kfilter={}, marker=None, limit=10,
detailed=False, name=None):
yield iter([{'name': 'fake_queue'}])
list_str = 'zaqar.storage.mongodb.queues.QueueController.list'
with mock.patch(list_str) as queues_list:
queues_list.side_effect = fake_list
queue_controller = pooling.QueueController(self.catalog)
result = queue_controller.list(project=self.project)
queue_list = list(next(result))
self.assertEqual(1, len(queue_list))
def test_queue_create_with_empty_json_body(self):
queue_controller = pooling.QueueController(self.catalog)
with mock.patch('zaqar.storage.pooling.Catalog.register') as register:
queue_controller.create(self.queue, metadata={},
project=self.project)
register.assert_called_with(self.queue, project=self.project,
flavor=None)

View File

@ -1,341 +0,0 @@
# Copyright (c) 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import contextlib
import uuid
import ddt
import falcon
from oslo_serialization import jsonutils
from zaqar import tests as testing
from zaqar.tests.unit.transport.wsgi import base
@contextlib.contextmanager
def flavor(test, name, pool_group, capabilities={}):
"""A context manager for constructing a flavor for use in testing.
Deletes the flavor after exiting the context.
:param test: Must expose simulate_* methods
:param name: Name for this flavor
:type name: six.text_type
:type pool_group: six.text_type
:type capabilities: dict
:returns: (name, uri, capabilities)
:rtype: see above
"""
doc = {'pool_group': pool_group, 'capabilities': capabilities}
path = test.url_prefix + '/flavors/' + name
test.simulate_put(path, body=jsonutils.dumps(doc))
try:
yield name, pool_group, capabilities
finally:
test.simulate_delete(path)
@contextlib.contextmanager
def flavors(test, count, pool_group):
"""A context manager for constructing flavors for use in testing.
Deletes the flavors after exiting the context.
:param test: Must expose simulate_* methods
:param count: Number of pools to create
:type count: int
:returns: (paths, pool_group, capabilities)
:rtype: ([six.text_type], [six.text_type], [dict])
"""
base = test.url_prefix + '/flavors/'
args = sorted([(base + str(i), {str(i): i}, str(i)) for i in range(count)],
key=lambda tup: tup[2])
for path, capabilities, _ in args:
doc = {'pool_group': pool_group, 'capabilities': capabilities}
test.simulate_put(path, body=jsonutils.dumps(doc))
try:
yield args
finally:
for path, _, _ in args:
test.simulate_delete(path)
@ddt.ddt
class TestFlavorsMongoDB(base.V1_1Base):
config_file = 'wsgi_mongodb_pooled.conf'
@testing.requires_mongodb
def setUp(self):
super(TestFlavorsMongoDB, self).setUp()
self.queue = 'test-queue'
self.queue_path = self.url_prefix + '/queues/' + self.queue
self.pool = 'mypool'
self.pool_group = 'mypool-group'
self.pool_path = self.url_prefix + '/pools/' + self.pool
self.pool_doc = {'weight': 100,
'group': self.pool_group,
'uri': self.mongodb_url}
self.simulate_put(self.pool_path, body=jsonutils.dumps(self.pool_doc))
self.flavor = 'test-flavor'
self.doc = {'capabilities': {}, 'pool_group': self.pool_group}
self.flavor_path = self.url_prefix + '/flavors/' + self.flavor
self.simulate_put(self.flavor_path, body=jsonutils.dumps(self.doc))
self.assertEqual(falcon.HTTP_201, self.srmock.status)
def tearDown(self):
self.simulate_delete(self.queue_path)
self.simulate_delete(self.flavor_path)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
self.simulate_delete(self.pool_path)
super(TestFlavorsMongoDB, self).tearDown()
def test_put_flavor_works(self):
name = str(uuid.uuid1())
with flavor(self, name, self.doc['pool_group']):
self.assertEqual(falcon.HTTP_201, self.srmock.status)
def test_put_raises_if_missing_fields(self):
path = self.url_prefix + '/flavors/' + str(uuid.uuid1())
self.simulate_put(path, body=jsonutils.dumps({}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
self.simulate_put(path,
body=jsonutils.dumps({'capabilities': {}}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(1, 2**32+1, [])
def test_put_raises_if_invalid_pool(self, pool):
path = self.url_prefix + '/flavors/' + str(uuid.uuid1())
self.simulate_put(path,
body=jsonutils.dumps({'pool_group': pool}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(-1, 'wee', [])
def test_put_raises_if_invalid_capabilities(self, capabilities):
path = self.url_prefix + '/flavors/' + str(uuid.uuid1())
doc = {'pool_group': 'a', 'capabilities': capabilities}
self.simulate_put(path, body=jsonutils.dumps(doc))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_put_existing_overwrites(self):
# NOTE(cabrera): setUp creates default flavor
expect = self.doc
self.simulate_put(self.flavor_path,
body=jsonutils.dumps(expect))
self.assertEqual(falcon.HTTP_201, self.srmock.status)
result = self.simulate_get(self.flavor_path)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
doc = jsonutils.loads(result[0])
self.assertEqual(expect['pool_group'], doc['pool_group'])
def test_create_flavor_no_pool_group(self):
self.simulate_delete(self.flavor_path)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
self.simulate_delete(self.pool_path)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
resp = self.simulate_put(self.flavor_path,
body=jsonutils.dumps(self.doc))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
self.assertEqual(
{'description': 'Flavor test-flavor could not be created. '
'Pool group mypool-group does not exist',
'title': 'Unable to create'},
jsonutils.loads(resp[0]))
def test_delete_works(self):
self.simulate_delete(self.flavor_path)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
self.simulate_get(self.flavor_path)
self.assertEqual(self.srmock.status, falcon.HTTP_404)
def test_get_nonexisting_raises_404(self):
self.simulate_get(self.url_prefix + '/flavors/nonexisting')
self.assertEqual(self.srmock.status, falcon.HTTP_404)
def _flavor_expect(self, flavor, xhref, xpool):
self.assertIn('href', flavor)
self.assertIn('name', flavor)
self.assertEqual(xhref, flavor['href'])
self.assertIn('pool_group', flavor)
self.assertEqual(xpool, flavor['pool_group'])
def test_get_works(self):
result = self.simulate_get(self.flavor_path)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
flavor = jsonutils.loads(result[0])
self._flavor_expect(flavor, self.flavor_path, self.doc['pool_group'])
def test_detailed_get_works(self):
result = self.simulate_get(self.flavor_path,
query_string='detailed=True')
self.assertEqual(falcon.HTTP_200, self.srmock.status)
flavor = jsonutils.loads(result[0])
self._flavor_expect(flavor, self.flavor_path, self.doc['pool_group'])
self.assertIn('capabilities', flavor)
self.assertEqual({}, flavor['capabilities'])
def test_patch_raises_if_missing_fields(self):
self.simulate_patch(self.flavor_path,
body=jsonutils.dumps({'location': 1}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def _patch_test(self, doc):
self.simulate_patch(self.flavor_path,
body=jsonutils.dumps(doc))
self.assertEqual(falcon.HTTP_200, self.srmock.status)
result = self.simulate_get(self.flavor_path,
query_string='detailed=True')
self.assertEqual(falcon.HTTP_200, self.srmock.status)
flavor = jsonutils.loads(result[0])
self._flavor_expect(flavor, self.flavor_path, doc['pool_group'])
self.assertEqual(doc['capabilities'], flavor['capabilities'])
def test_patch_works(self):
doc = {'pool_group': 'my-pool-group', 'capabilities': {'a': 1}}
self._patch_test(doc)
def test_patch_works_with_extra_fields(self):
doc = {'pool_group': 'my-pool-group', 'capabilities': {'a': 1},
'location': 100, 'partition': 'taco'}
self._patch_test(doc)
@ddt.data(-1, 2**32+1, [])
def test_patch_raises_400_on_invalid_pool_group(self, pool_group):
self.simulate_patch(self.flavor_path,
body=jsonutils.dumps({'pool_group': pool_group}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(-1, 'wee', [])
def test_patch_raises_400_on_invalid_capabilities(self, capabilities):
doc = {'capabilities': capabilities}
self.simulate_patch(self.flavor_path, body=jsonutils.dumps(doc))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_patch_raises_404_if_flavor_not_found(self):
self.simulate_patch(self.url_prefix + '/flavors/notexists',
body=jsonutils.dumps({'pool_group': 'test'}))
self.assertEqual(self.srmock.status, falcon.HTTP_404)
def test_empty_listing(self):
self.simulate_delete(self.flavor_path)
result = self.simulate_get(self.url_prefix + '/flavors')
results = jsonutils.loads(result[0])
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self.assertEqual(0, len(results['flavors']))
self.assertIn('links', results)
def _listing_test(self, count=10, limit=10,
marker=None, detailed=False):
# NOTE(cpp-cabrera): delete initial flavor - it will interfere
# with listing tests
self.simulate_delete(self.flavor_path)
query = 'limit={0}&detailed={1}'.format(limit, detailed)
if marker:
query += '&marker={2}'.format(marker)
with flavors(self, count, self.doc['pool_group']) as expected:
result = self.simulate_get(self.url_prefix + '/flavors',
query_string=query)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
results = jsonutils.loads(result[0])
self.assertIsInstance(results, dict)
self.assertIn('flavors', results)
self.assertIn('links', results)
flavors_list = results['flavors']
link = results['links'][0]
self.assertEqual('next', link['rel'])
href = falcon.uri.parse_query_string(link['href'].split('?')[1])
self.assertIn('marker', href)
self.assertEqual(str(limit), href['limit'])
self.assertEqual(str(detailed).lower(), href['detailed'])
next_query_string = ('marker={marker}&limit={limit}'
'&detailed={detailed}').format(**href)
next_result = self.simulate_get(link['href'].split('?')[0],
query_string=next_query_string)
next_flavors = jsonutils.loads(next_result[0])
next_flavors_list = next_flavors['flavors']
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self.assertIn('links', next_flavors)
if limit < count:
self.assertEqual(min(limit, count-limit),
len(next_flavors_list))
else:
self.assertEqual(0, len(next_flavors_list))
self.assertEqual(min(limit, count), len(flavors_list))
for i, s in enumerate(flavors_list + next_flavors_list):
expect = expected[i]
path, capabilities = expect[:2]
self._flavor_expect(s, path, self.doc['pool_group'])
if detailed:
self.assertIn('capabilities', s)
self.assertEqual(s['capabilities'], capabilities)
else:
self.assertNotIn('capabilities', s)
def test_listing_works(self):
self._listing_test()
def test_detailed_listing_works(self):
self._listing_test(detailed=True)
@ddt.data(1, 5, 10, 15)
def test_listing_works_with_limit(self, limit):
self._listing_test(count=15, limit=limit)
def test_listing_marker_is_respected(self):
self.simulate_delete(self.flavor_path)
with flavors(self, 10, self.doc['pool_group']) as expected:
result = self.simulate_get(self.url_prefix + '/flavors',
query_string='marker=3')
self.assertEqual(falcon.HTTP_200, self.srmock.status)
flavor_list = jsonutils.loads(result[0])['flavors']
self.assertEqual(6, len(flavor_list))
path, capabilities = expected[4][:2]
self._flavor_expect(flavor_list[0], path, self.doc['pool_group'])
def test_queue_create_works(self):
metadata = {'_flavor': self.flavor}
self.simulate_put(self.queue_path, body=jsonutils.dumps(metadata))
self.assertEqual(falcon.HTTP_201, self.srmock.status)
def test_queue_create_no_flavor(self):
metadata = {'_flavor': self.flavor}
self.simulate_delete(self.flavor_path)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
self.simulate_put(self.queue_path, body=jsonutils.dumps(metadata))
self.assertEqual(falcon.HTTP_400, self.srmock.status)

View File

@ -1,354 +0,0 @@
# Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import contextlib
import ddt
import falcon
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from zaqar import tests as testing
from zaqar.tests.unit.transport.wsgi import base
@contextlib.contextmanager
def pool(test, name, weight, uri, group=None, options={}):
"""A context manager for constructing a pool for use in testing.
Deletes the pool after exiting the context.
:param test: Must expose simulate_* methods
:param name: Name for this pool
:type name: six.text_type
:type weight: int
:type uri: six.text_type
:type options: dict
:returns: (name, weight, uri, options)
:rtype: see above
"""
uri = "%s/%s" % (uri, uuidutils.generate_uuid())
doc = {'weight': weight, 'uri': uri,
'group': group, 'options': options}
path = test.url_prefix + '/pools/' + name
test.simulate_put(path, body=jsonutils.dumps(doc))
test.addCleanup(test.simulate_delete, path)
try:
yield name, weight, uri, group, options
finally:
test.simulate_delete(path)
@contextlib.contextmanager
def pools(test, count, uri, group):
"""A context manager for constructing pools for use in testing.
Deletes the pools after exiting the context.
:param test: Must expose simulate_* methods
:param count: Number of pools to create
:type count: int
:returns: (paths, weights, uris, options)
:rtype: ([six.text_type], [int], [six.text_type], [dict])
"""
mongo_url = uri
base = test.url_prefix + '/pools/'
args = [(base + str(i), i,
{str(i): i})
for i in range(count)]
for path, weight, option in args:
uri = "%s/%s" % (mongo_url, uuidutils.generate_uuid())
doc = {'weight': weight, 'uri': uri,
'group': group, 'options': option}
test.simulate_put(path, body=jsonutils.dumps(doc))
try:
yield args
finally:
for path, _, _ in args:
test.simulate_delete(path)
@ddt.ddt
class TestPoolsMongoDB(base.V1_1Base):
config_file = 'wsgi_mongodb_pooled.conf'
@testing.requires_mongodb
def setUp(self):
super(TestPoolsMongoDB, self).setUp()
self.doc = {'weight': 100,
'group': 'mygroup',
'uri': self.mongodb_url}
self.pool = self.url_prefix + '/pools/' + uuidutils.generate_uuid()
self.simulate_put(self.pool, body=jsonutils.dumps(self.doc))
self.assertEqual(falcon.HTTP_201, self.srmock.status)
def tearDown(self):
super(TestPoolsMongoDB, self).tearDown()
self.simulate_delete(self.pool)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
def test_put_pool_works(self):
name = uuidutils.generate_uuid()
weight, uri = self.doc['weight'], self.doc['uri']
with pool(self, name, weight, uri, group='my-group'):
self.assertEqual(falcon.HTTP_201, self.srmock.status)
def test_put_raises_if_missing_fields(self):
path = self.url_prefix + '/pools/' + uuidutils.generate_uuid()
self.simulate_put(path, body=jsonutils.dumps({'weight': 100}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
self.simulate_put(path,
body=jsonutils.dumps(
{'uri': self.mongodb_url}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(-1, 2**32+1, 'big')
def test_put_raises_if_invalid_weight(self, weight):
path = self.url_prefix + '/pools/' + uuidutils.generate_uuid()
doc = {'weight': weight, 'uri': 'a'}
self.simulate_put(path,
body=jsonutils.dumps(doc))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(-1, 2**32+1, [], 'localhost:27017')
def test_put_raises_if_invalid_uri(self, uri):
path = self.url_prefix + '/pools/' + uuidutils.generate_uuid()
self.simulate_put(path,
body=jsonutils.dumps({'weight': 1, 'uri': uri}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(-1, 'wee', [])
def test_put_raises_if_invalid_options(self, options):
path = self.url_prefix + '/pools/' + uuidutils.generate_uuid()
doc = {'weight': 1, 'uri': 'a', 'options': options}
self.simulate_put(path, body=jsonutils.dumps(doc))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_put_existing_overwrites(self):
# NOTE(cabrera): setUp creates default pool
expect = self.doc
self.simulate_put(self.pool,
body=jsonutils.dumps(expect))
self.assertEqual(falcon.HTTP_201, self.srmock.status)
result = self.simulate_get(self.pool)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
doc = jsonutils.loads(result[0])
self.assertEqual(expect['weight'], doc['weight'])
self.assertEqual(expect['uri'], doc['uri'])
def test_put_capabilities_mismatch_pool(self):
mongodb_doc = self.doc
self.simulate_put(self.pool,
body=jsonutils.dumps(mongodb_doc))
self.assertEqual(falcon.HTTP_201, self.srmock.status)
redis_doc = {'weight': 100,
'group': 'mygroup',
'uri': 'redis://127.0.0.1:6379'}
self.simulate_put(self.pool,
body=jsonutils.dumps(redis_doc))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_delete_works(self):
self.simulate_delete(self.pool)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
self.simulate_get(self.pool)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
def test_get_nonexisting_raises_404(self):
self.simulate_get(self.url_prefix + '/pools/nonexisting')
self.assertEqual(falcon.HTTP_404, self.srmock.status)
def _pool_expect(self, pool, xhref, xweight, xuri):
self.assertIn('href', pool)
self.assertIn('name', pool)
self.assertEqual(xhref, pool['href'])
self.assertIn('weight', pool)
self.assertEqual(xweight, pool['weight'])
self.assertIn('uri', pool)
# NOTE(dynarro): we are using startwith because we are adding to
# pools UUIDs, to avoid dupplications
self.assertTrue(pool['uri'].startswith(xuri))
def test_get_works(self):
result = self.simulate_get(self.pool)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
pool = jsonutils.loads(result[0])
self._pool_expect(pool, self.pool, self.doc['weight'],
self.doc['uri'])
def test_detailed_get_works(self):
result = self.simulate_get(self.pool,
query_string='detailed=True')
self.assertEqual(falcon.HTTP_200, self.srmock.status)
pool = jsonutils.loads(result[0])
self._pool_expect(pool, self.pool, self.doc['weight'],
self.doc['uri'])
self.assertIn('options', pool)
self.assertEqual({}, pool['options'])
def test_patch_raises_if_missing_fields(self):
self.simulate_patch(self.pool,
body=jsonutils.dumps({'location': 1}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def _patch_test(self, doc):
self.simulate_patch(self.pool,
body=jsonutils.dumps(doc))
self.assertEqual(falcon.HTTP_200, self.srmock.status)
result = self.simulate_get(self.pool,
query_string='detailed=True')
self.assertEqual(falcon.HTTP_200, self.srmock.status)
pool = jsonutils.loads(result[0])
self._pool_expect(pool, self.pool, doc['weight'],
doc['uri'])
self.assertEqual(doc['options'], pool['options'])
def test_patch_works(self):
doc = {'weight': 101,
'uri': self.mongodb_url,
'options': {'a': 1}}
self._patch_test(doc)
def test_patch_works_with_extra_fields(self):
doc = {'weight': 101,
'uri': self.mongodb_url,
'options': {'a': 1},
'location': 100, 'partition': 'taco'}
self._patch_test(doc)
@ddt.data(-1, 2**32+1, 'big')
def test_patch_raises_400_on_invalid_weight(self, weight):
self.simulate_patch(self.pool,
body=jsonutils.dumps({'weight': weight}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(-1, 2**32+1, [], 'localhost:27017')
def test_patch_raises_400_on_invalid_uri(self, uri):
self.simulate_patch(self.pool,
body=jsonutils.dumps({'uri': uri}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(-1, 'wee', [])
def test_patch_raises_400_on_invalid_options(self, options):
self.simulate_patch(self.pool,
body=jsonutils.dumps({'options': options}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_patch_raises_404_if_pool_not_found(self):
self.simulate_patch(self.url_prefix + '/pools/notexists',
body=jsonutils.dumps({'weight': 1}))
self.assertEqual(falcon.HTTP_404, self.srmock.status)
def test_empty_listing(self):
self.simulate_delete(self.pool)
result = self.simulate_get(self.url_prefix + '/pools')
results = jsonutils.loads(result[0])
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self.assertEqual(0, len(results['pools']))
self.assertIn('links', results)
def _listing_test(self, count=10, limit=10,
marker=None, detailed=False):
# NOTE(cpp-cabrera): delete initial pool - it will interfere
# with listing tests
self.simulate_delete(self.pool)
query = 'limit={0}&detailed={1}'.format(limit, detailed)
if marker:
query += '&marker={0}'.format(marker)
with pools(self, count, self.doc['uri'], 'my-group') as expected:
result = self.simulate_get(self.url_prefix + '/pools',
query_string=query)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
results = jsonutils.loads(result[0])
self.assertIsInstance(results, dict)
self.assertIn('pools', results)
self.assertIn('links', results)
pool_list = results['pools']
link = results['links'][0]
self.assertEqual('next', link['rel'])
href = falcon.uri.parse_query_string(link['href'].split('?')[1])
self.assertIn('marker', href)
self.assertEqual(str(limit), href['limit'])
self.assertEqual(str(detailed).lower(), href['detailed'])
next_query_string = ('marker={marker}&limit={limit}'
'&detailed={detailed}').format(**href)
next_result = self.simulate_get(link['href'].split('?')[0],
query_string=next_query_string)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
next_pool = jsonutils.loads(next_result[0])
next_pool_list = next_pool['pools']
self.assertIn('links', next_pool)
if limit < count:
self.assertEqual(min(limit, count-limit),
len(next_pool_list))
else:
# NOTE(jeffrey4l): when limit >= count, there will be no
# pools in the 2nd page.
self.assertEqual(0, len(next_pool_list))
self.assertEqual(min(limit, count), len(pool_list))
for s in pool_list + next_pool_list:
# NOTE(flwang): It can't assumed that both sqlalchemy and
# mongodb can return query result with the same order. Just
# like the order they're inserted. Actually, sqlalchemy can't
# guarantee that. So we're leveraging the relationship between
# pool weight and the index of pools fixture to get the
# right pool to verify.
expect = expected[s['weight']]
path, weight, group = expect[:3]
self._pool_expect(s, path, weight, self.doc['uri'])
if detailed:
self.assertIn('options', s)
self.assertEqual(s['options'], expect[-1])
else:
self.assertNotIn('options', s)
def test_listing_works(self):
self._listing_test()
def test_detailed_listing_works(self):
self._listing_test(detailed=True)
@ddt.data(1, 5, 10, 15)
def test_listing_works_with_limit(self, limit):
self._listing_test(count=15, limit=limit)
def test_listing_marker_is_respected(self):
self.simulate_delete(self.pool)
with pools(self, 10, self.doc['uri'], 'my-group') as expected:
result = self.simulate_get(self.url_prefix + '/pools',
query_string='marker=3')
self.assertEqual(falcon.HTTP_200, self.srmock.status)
pool_list = jsonutils.loads(result[0])['pools']
self.assertEqual(6, len(pool_list))
path, weight = expected[4][:2]
self._pool_expect(pool_list[0], path, weight, self.doc['uri'])

View File

@ -1,350 +0,0 @@
# Copyright (c) 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import contextlib
import uuid
import ddt
import falcon
from oslo_serialization import jsonutils
from zaqar import tests as testing
from zaqar.tests.unit.transport.wsgi import base
# NOTE(gengchc2): remove pool_group in Rocky release.
@contextlib.contextmanager
def flavor(test, name, pool_group):
"""A context manager for constructing a flavor for use in testing.
Deletes the flavor after exiting the context.
:param test: Must expose simulate_* methods
:param name: Name for this flavor
:type name: six.text_type
:type pool: six.text_type
:returns: (name, uri, capabilities)
:rtype: see above
"""
doc = {'pool_group': pool_group}
path = test.url_prefix + '/flavors/' + name
test.simulate_put(path, body=jsonutils.dumps(doc))
try:
yield name, pool_group
finally:
test.simulate_delete(path)
@contextlib.contextmanager
def flavors(test, count, pool_group):
"""A context manager for constructing flavors for use in testing.
Deletes the flavors after exiting the context.
:param test: Must expose simulate_* methods
:param count: Number of pools to create
:type count: int
:returns: (paths, pool_group, capabilities)
:rtype: ([six.text_type], [six.text_type], [dict])
"""
base = test.url_prefix + '/flavors/'
args = sorted([(base + str(i), str(i)) for i in range(count)],
key=lambda tup: tup[1])
for path, _ in args:
doc = {'pool_group': pool_group}
test.simulate_put(path, body=jsonutils.dumps(doc))
try:
yield args
finally:
for path, _ in args:
test.simulate_delete(path)
@ddt.ddt
class TestFlavorsMongoDB(base.V2Base):
config_file = 'wsgi_mongodb_pooled.conf'
@testing.requires_mongodb
def setUp(self):
super(TestFlavorsMongoDB, self).setUp()
self.queue = 'test-queue'
self.queue_path = self.url_prefix + '/queues/' + self.queue
self.pool = 'mypool'
self.pool_group = 'mypool-group'
self.pool_path = self.url_prefix + '/pools/' + self.pool
self.pool_doc = {'weight': 100,
'group': self.pool_group,
'uri': self.mongodb_url + '/test'}
self.simulate_put(self.pool_path, body=jsonutils.dumps(self.pool_doc))
self.flavor = 'test-flavor'
self.doc = {'capabilities': {}, 'pool_group': self.pool_group}
self.flavor_path = self.url_prefix + '/flavors/' + self.flavor
self.simulate_put(self.flavor_path, body=jsonutils.dumps(self.doc))
self.assertEqual(falcon.HTTP_201, self.srmock.status)
def tearDown(self):
self.simulate_delete(self.queue_path)
self.simulate_delete(self.flavor_path)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
self.simulate_delete(self.pool_path)
super(TestFlavorsMongoDB, self).tearDown()
def test_put_flavor_works(self):
name = str(uuid.uuid1())
with flavor(self, name, self.doc['pool_group']):
self.assertEqual(falcon.HTTP_201, self.srmock.status)
def test_put_raises_if_missing_fields(self):
path = self.url_prefix + '/flavors/' + str(uuid.uuid1())
self.simulate_put(path, body=jsonutils.dumps({}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
self.simulate_put(path,
body=jsonutils.dumps({'capabilities': {}}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(1, 2**32+1, [])
def test_put_raises_if_invalid_pool(self, pool_group):
path = self.url_prefix + '/flavors/' + str(uuid.uuid1())
self.simulate_put(path,
body=jsonutils.dumps({'pool_group': pool_group}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_put_auto_get_capabilities(self):
path = self.url_prefix + '/flavors/' + str(uuid.uuid1())
doc = {'pool_group': self.pool_group}
self.simulate_put(path, body=jsonutils.dumps(doc))
self.assertEqual(falcon.HTTP_201, self.srmock.status)
# NOTE(gengchc2): Delete it, otherwise exist garbage flavor.
self.simulate_delete(path)
def test_put_existing_overwrites(self):
# NOTE(cabrera): setUp creates default flavor
expect = self.doc
self.simulate_put(self.flavor_path,
body=jsonutils.dumps(expect))
self.assertEqual(falcon.HTTP_201, self.srmock.status)
result = self.simulate_get(self.flavor_path)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
doc = jsonutils.loads(result[0])
self.assertEqual(expect['pool_group'], doc['pool_group'])
def test_create_flavor_no_pool_group(self):
self.simulate_delete(self.flavor_path)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
self.simulate_delete(self.pool_path)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
resp = self.simulate_put(self.flavor_path,
body=jsonutils.dumps(self.doc))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
self.assertEqual(
{'description': 'Flavor test-flavor could not be created. '
'Pool group mypool-group does not exist',
'title': 'Unable to create'},
jsonutils.loads(resp[0]))
def test_delete_works(self):
self.simulate_delete(self.flavor_path)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
self.simulate_get(self.flavor_path)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
def test_get_nonexisting_raises_404(self):
self.simulate_get(self.url_prefix + '/flavors/nonexisting')
self.assertEqual(falcon.HTTP_404, self.srmock.status)
def _flavor_expect(self, flavor, xhref, xpool_group):
self.assertIn('href', flavor)
self.assertIn('name', flavor)
self.assertEqual(xhref, flavor['href'])
self.assertIn('pool_group', flavor)
self.assertEqual(xpool_group, flavor['pool_group'])
def test_get_works(self):
result = self.simulate_get(self.flavor_path)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
flavor = jsonutils.loads(result[0])
self._flavor_expect(flavor, self.flavor_path, self.doc['pool_group'])
store_caps = ['FIFO', 'CLAIMS', 'DURABILITY',
'AOD', 'HIGH_THROUGHPUT']
self.assertEqual(store_caps, flavor['capabilities'])
def test_patch_raises_if_missing_fields(self):
self.simulate_patch(self.flavor_path,
body=jsonutils.dumps({'location': 1}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def _patch_test(self, doc):
result = self.simulate_patch(self.flavor_path,
body=jsonutils.dumps(doc))
self.assertEqual(falcon.HTTP_200, self.srmock.status)
updated_flavor = jsonutils.loads(result[0])
self._flavor_expect(updated_flavor, self.flavor_path,
doc['pool_group'])
self.assertEqual(doc['capabilities'], updated_flavor['capabilities'])
result = self.simulate_get(self.flavor_path)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
flavor = jsonutils.loads(result[0])
self._flavor_expect(flavor, self.flavor_path, doc['pool_group'])
self.assertEqual(doc['capabilities'], flavor['capabilities'])
def test_patch_works(self):
doc = {'pool_group': 'mypoolgroup', 'capabilities': []}
self._patch_test(doc)
def test_patch_works_with_extra_fields(self):
doc = {'pool_group': 'mypoolgroup', 'capabilities': [],
'location': 100, 'partition': 'taco'}
self._patch_test(doc)
@ddt.data(-1, 2**32+1, [])
def test_patch_raises_400_on_invalid_pool_group(self, pool_group):
self.simulate_patch(self.flavor_path,
body=jsonutils.dumps({'pool_group': pool_group}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(-1, 'wee', [])
def test_patch_raises_400_on_invalid_capabilities(self, capabilities):
doc = {'capabilities': capabilities}
self.simulate_patch(self.flavor_path, body=jsonutils.dumps(doc))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_patch_raises_404_if_flavor_not_found(self):
self.simulate_patch(self.url_prefix + '/flavors/notexists',
body=jsonutils.dumps({'pool_group': 'test'}))
self.assertEqual(falcon.HTTP_404, self.srmock.status)
def test_empty_listing(self):
self.simulate_delete(self.flavor_path)
result = self.simulate_get(self.url_prefix + '/flavors')
results = jsonutils.loads(result[0])
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self.assertEqual(0, len(results['flavors']))
self.assertIn('links', results)
def _listing_test(self, count=10, limit=10,
marker=None, detailed=False):
# NOTE(cpp-cabrera): delete initial flavor - it will interfere
# with listing tests
self.simulate_delete(self.flavor_path)
query = 'limit={0}&detailed={1}'.format(limit, detailed)
if marker:
query += '&marker={2}'.format(marker)
with flavors(self, count, self.doc['pool_group']) as expected:
result = self.simulate_get(self.url_prefix + '/flavors',
query_string=query)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
results = jsonutils.loads(result[0])
self.assertIsInstance(results, dict)
self.assertIn('flavors', results)
self.assertIn('links', results)
flavors_list = results['flavors']
link = results['links'][0]
self.assertEqual('next', link['rel'])
href = falcon.uri.parse_query_string(link['href'].split('?')[1])
self.assertIn('marker', href)
self.assertEqual(str(limit), href['limit'])
self.assertEqual(str(detailed).lower(), href['detailed'])
next_query_string = ('marker={marker}&limit={limit}'
'&detailed={detailed}').format(**href)
next_result = self.simulate_get(link['href'].split('?')[0],
query_string=next_query_string)
next_flavors = jsonutils.loads(next_result[0])
next_flavors_list = next_flavors['flavors']
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self.assertIn('links', next_flavors)
if limit < count:
self.assertEqual(min(limit, count-limit),
len(next_flavors_list))
else:
self.assertEqual(0, len(next_flavors_list))
self.assertEqual(min(limit, count), len(flavors_list))
for i, s in enumerate(flavors_list + next_flavors_list):
expect = expected[i]
path = expect[0]
capabilities = ['FIFO', 'CLAIMS', 'DURABILITY',
'AOD', 'HIGH_THROUGHPUT']
self._flavor_expect(s, path, self.doc['pool_group'])
if detailed:
self.assertIn('capabilities', s)
self.assertEqual(s['capabilities'], capabilities)
else:
self.assertNotIn('capabilities', s)
def test_listing_works(self):
self._listing_test()
def test_detailed_listing_works(self):
self._listing_test(detailed=True)
@ddt.data(1, 5, 10, 15)
def test_listing_works_with_limit(self, limit):
self._listing_test(count=15, limit=limit)
def test_listing_marker_is_respected(self):
self.simulate_delete(self.flavor_path)
with flavors(self, 10, self.doc['pool_group']) as expected:
result = self.simulate_get(self.url_prefix + '/flavors',
query_string='marker=3')
self.assertEqual(falcon.HTTP_200, self.srmock.status)
flavor_list = jsonutils.loads(result[0])['flavors']
self.assertEqual(6, len(flavor_list))
path, capabilities = expected[4][:2]
self._flavor_expect(flavor_list[0], path, self.doc['pool_group'])
def test_listing_error_with_invalid_limit(self):
self.simulate_delete(self.flavor_path)
query = 'limit={0}&detailed={1}'.format(0, True)
with flavors(self, 10, self.doc['pool_group']):
self.simulate_get(self.url_prefix + '/flavors', query_string=query)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_queue_create_works(self):
metadata = {'_flavor': self.flavor}
self.simulate_put(self.queue_path, body=jsonutils.dumps(metadata))
self.assertEqual(falcon.HTTP_201, self.srmock.status)
def test_queue_create_no_flavor(self):
metadata = {'_flavor': self.flavor}
self.simulate_delete(self.flavor_path)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
self.simulate_put(self.queue_path, body=jsonutils.dumps(metadata))
self.assertEqual(falcon.HTTP_400, self.srmock.status)

View File

@ -1,373 +0,0 @@
# Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import contextlib
import ddt
import falcon
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from zaqar import tests as testing
from zaqar.tests.unit.transport.wsgi import base
# NOTE(gengchc2): remove pool_group in Rocky release.
@contextlib.contextmanager
def pool(test, name, weight, uri, group=None, options={}):
"""A context manager for constructing a pool for use in testing.
Deletes the pool after exiting the context.
:param test: Must expose simulate_* methods
:param name: Name for this pool
:type name: six.text_type
:type weight: int
:type uri: six.text_type
:type options: dict
:returns: (name, weight, uri, options)
:rtype: see above
"""
uri = "%s/%s" % (uri, uuidutils.generate_uuid())
doc = {'weight': weight, 'uri': uri,
'group': group, 'options': options}
path = test.url_prefix + '/pools/' + name
test.simulate_put(path, body=jsonutils.dumps(doc))
try:
yield name, weight, uri, group, options
finally:
test.simulate_delete(path)
@contextlib.contextmanager
def pools(test, count, uri, group):
"""A context manager for constructing pools for use in testing.
Deletes the pools after exiting the context.
:param test: Must expose simulate_* methods
:param count: Number of pools to create
:type count: int
:returns: (paths, weights, uris, options)
:rtype: ([six.text_type], [int], [six.text_type], [dict])
"""
mongo_url = uri
base = test.url_prefix + '/pools/'
args = [(base + str(i), i,
{str(i): i})
for i in range(count)]
for path, weight, option in args:
uri = "%s/%s" % (mongo_url, uuidutils.generate_uuid())
doc = {'weight': weight, 'uri': uri,
'group': group, 'options': option}
test.simulate_put(path, body=jsonutils.dumps(doc))
try:
yield args
finally:
for path, _, _ in args:
test.simulate_delete(path)
@ddt.ddt
class TestPoolsMongoDB(base.V2Base):
config_file = 'wsgi_mongodb_pooled.conf'
@testing.requires_mongodb
def setUp(self):
super(TestPoolsMongoDB, self).setUp()
self.doc = {'weight': 100,
'group': 'mygroup',
'uri': self.mongodb_url}
self.pool = self.url_prefix + '/pools/' + uuidutils.generate_uuid()
self.simulate_put(self.pool, body=jsonutils.dumps(self.doc))
self.assertEqual(falcon.HTTP_201, self.srmock.status)
def tearDown(self):
super(TestPoolsMongoDB, self).tearDown()
self.simulate_delete(self.pool)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
def test_put_pool_works(self):
name = uuidutils.generate_uuid()
weight, uri = self.doc['weight'], self.doc['uri']
with pool(self, name, weight, uri, group='my-group'):
self.assertEqual(falcon.HTTP_201, self.srmock.status)
def test_put_raises_if_missing_fields(self):
path = self.url_prefix + '/pools/' + uuidutils.generate_uuid()
self.simulate_put(path, body=jsonutils.dumps({'weight': 100}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
self.simulate_put(path,
body=jsonutils.dumps(
{'uri': self.mongodb_url}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(-1, 2**32+1, 'big')
def test_put_raises_if_invalid_weight(self, weight):
path = self.url_prefix + '/pools/' + uuidutils.generate_uuid()
doc = {'weight': weight, 'uri': 'a'}
self.simulate_put(path,
body=jsonutils.dumps(doc))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(-1, 2**32+1, [], 'localhost:27017')
def test_put_raises_if_invalid_uri(self, uri):
path = self.url_prefix + '/pools/' + uuidutils.generate_uuid()
self.simulate_put(path,
body=jsonutils.dumps({'weight': 1, 'uri': uri}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(-1, 'wee', [])
def test_put_raises_if_invalid_options(self, options):
path = self.url_prefix + '/pools/' + uuidutils.generate_uuid()
doc = {'weight': 1, 'uri': 'a', 'options': options}
self.simulate_put(path, body=jsonutils.dumps(doc))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_put_same_database_uri(self):
# NOTE(cabrera): setUp creates default pool
expect = self.doc
path = self.url_prefix + '/pools/' + uuidutils.generate_uuid()
self.simulate_put(path, body=jsonutils.dumps(expect))
self.assertEqual(falcon.HTTP_409, self.srmock.status)
def test_put_existing_overwrites(self):
# NOTE(cabrera): setUp creates default pool
expect = self.doc
self.simulate_put(self.pool,
body=jsonutils.dumps(expect))
self.assertEqual(falcon.HTTP_201, self.srmock.status)
result = self.simulate_get(self.pool)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
doc = jsonutils.loads(result[0])
self.assertEqual(expect['weight'], doc['weight'])
self.assertEqual(expect['uri'], doc['uri'])
def test_put_capabilities_mismatch_pool(self):
mongodb_doc = self.doc
self.simulate_put(self.pool,
body=jsonutils.dumps(mongodb_doc))
self.assertEqual(falcon.HTTP_201, self.srmock.status)
redis_doc = {'weight': 100,
'group': 'mygroup',
'uri': 'redis://127.0.0.1:6379'}
self.simulate_put(self.pool,
body=jsonutils.dumps(redis_doc))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_delete_works(self):
self.simulate_delete(self.pool)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
self.simulate_get(self.pool)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
def test_get_nonexisting_raises_404(self):
self.simulate_get(self.url_prefix + '/pools/nonexisting')
self.assertEqual(falcon.HTTP_404, self.srmock.status)
def _pool_expect(self, pool, xhref, xweight, xuri):
self.assertIn('href', pool)
self.assertIn('name', pool)
self.assertEqual(xhref, pool['href'])
self.assertIn('weight', pool)
self.assertEqual(xweight, pool['weight'])
self.assertIn('uri', pool)
# NOTE(dynarro): we are using startwith because we are adding to
# pools UUIDs, to avoid dupplications
self.assertTrue(pool['uri'].startswith(xuri))
def test_get_works(self):
result = self.simulate_get(self.pool)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
pool = jsonutils.loads(result[0])
self._pool_expect(pool, self.pool, self.doc['weight'],
self.doc['uri'])
def test_detailed_get_works(self):
result = self.simulate_get(self.pool,
query_string='detailed=True')
self.assertEqual(falcon.HTTP_200, self.srmock.status)
pool = jsonutils.loads(result[0])
self._pool_expect(pool, self.pool, self.doc['weight'],
self.doc['uri'])
self.assertIn('options', pool)
self.assertEqual({}, pool['options'])
def test_patch_raises_if_missing_fields(self):
self.simulate_patch(self.pool,
body=jsonutils.dumps({'location': 1}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def _patch_test(self, doc):
result = self.simulate_patch(self.pool,
body=jsonutils.dumps(doc))
self.assertEqual(falcon.HTTP_200, self.srmock.status)
updated_pool = jsonutils.loads(result[0])
self._pool_expect(updated_pool, self.pool, doc['weight'],
doc['uri'])
result = self.simulate_get(self.pool,
query_string='detailed=True')
self.assertEqual(falcon.HTTP_200, self.srmock.status)
pool = jsonutils.loads(result[0])
self._pool_expect(pool, self.pool, doc['weight'],
doc['uri'])
self.assertEqual(doc['options'], pool['options'])
def test_patch_works(self):
doc = {'weight': 101,
'uri': self.mongodb_url,
'options': {'a': 1}}
self._patch_test(doc)
def test_patch_works_with_extra_fields(self):
doc = {'weight': 101,
'uri': self.mongodb_url,
'options': {'a': 1},
'location': 100,
'partition': 'taco'}
self._patch_test(doc)
@ddt.data(-1, 2**32+1, 'big')
def test_patch_raises_400_on_invalid_weight(self, weight):
self.simulate_patch(self.pool,
body=jsonutils.dumps({'weight': weight}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(-1, 2**32+1, [], 'localhost:27017')
def test_patch_raises_400_on_invalid_uri(self, uri):
self.simulate_patch(self.pool,
body=jsonutils.dumps({'uri': uri}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(-1, 'wee', [])
def test_patch_raises_400_on_invalid_options(self, options):
self.simulate_patch(self.pool,
body=jsonutils.dumps({'options': options}))
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_patch_raises_404_if_pool_not_found(self):
self.simulate_patch(self.url_prefix + '/pools/notexists',
body=jsonutils.dumps({'weight': 1}))
self.assertEqual(falcon.HTTP_404, self.srmock.status)
def test_empty_listing(self):
self.simulate_delete(self.pool)
result = self.simulate_get(self.url_prefix + '/pools')
results = jsonutils.loads(result[0])
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self.assertEqual(0, len(results['pools']))
self.assertIn('links', results)
def _listing_test(self, count=10, limit=10,
marker=None, detailed=False):
# NOTE(cpp-cabrera): delete initial pool - it will interfere
# with listing tests
self.simulate_delete(self.pool)
query = 'limit={0}&detailed={1}'.format(limit, detailed)
if marker:
query += '&marker={0}'.format(marker)
with pools(self, count, self.doc['uri'], 'my-group') as expected:
result = self.simulate_get(self.url_prefix + '/pools',
query_string=query)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
results = jsonutils.loads(result[0])
self.assertIsInstance(results, dict)
self.assertIn('pools', results)
self.assertIn('links', results)
pool_list = results['pools']
link = results['links'][0]
self.assertEqual('next', link['rel'])
href = falcon.uri.parse_query_string(link['href'].split('?')[1])
self.assertIn('marker', href)
self.assertEqual(str(limit), href['limit'])
self.assertEqual(str(detailed).lower(), href['detailed'])
next_query_string = ('marker={marker}&limit={limit}'
'&detailed={detailed}').format(**href)
next_result = self.simulate_get(link['href'].split('?')[0],
query_string=next_query_string)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
next_pool = jsonutils.loads(next_result[0])
next_pool_list = next_pool['pools']
self.assertIn('links', next_pool)
if limit < count:
self.assertEqual(min(limit, count-limit),
len(next_pool_list))
else:
# NOTE(jeffrey4l): when limit >= count, there will be no
# pools in the 2nd page.
self.assertEqual(0, len(next_pool_list))
self.assertEqual(min(limit, count), len(pool_list))
for s in pool_list + next_pool_list:
# NOTE(flwang): It can't assumed that both sqlalchemy and
# mongodb can return query result with the same order. Just
# like the order they're inserted. Actually, sqlalchemy can't
# guarantee that. So we're leveraging the relationship between
# pool weight and the index of pools fixture to get the
# right pool to verify.
expect = expected[s['weight']]
path, weight, group = expect[:3]
self._pool_expect(s, path, weight, self.doc['uri'])
if detailed:
self.assertIn('options', s)
self.assertEqual(s['options'], expect[-1])
else:
self.assertNotIn('options', s)
def test_listing_works(self):
self._listing_test()
def test_detailed_listing_works(self):
self._listing_test(detailed=True)
@ddt.data(1, 5, 10, 15)
def test_listing_works_with_limit(self, limit):
self._listing_test(count=15, limit=limit)
def test_listing_marker_is_respected(self):
self.simulate_delete(self.pool)
with pools(self, 10, self.doc['uri'], 'my-group') as expected:
result = self.simulate_get(self.url_prefix + '/pools',
query_string='marker=3')
self.assertEqual(falcon.HTTP_200, self.srmock.status)
pool_list = jsonutils.loads(result[0])['pools']
self.assertEqual(6, len(pool_list))
path, weight = expected[4][:2]
self._pool_expect(pool_list[0], path, weight, self.doc['uri'])
def test_listing_error_with_invalid_limit(self):
self.simulate_delete(self.pool)
query = 'limit={0}&detailed={1}'.format(0, True)
with pools(self, 10, self.doc['uri'], 'my-group'):
self.simulate_get(self.url_prefix + '/pools', query_string=query)
self.assertEqual(falcon.HTTP_400, self.srmock.status)

View File

@ -74,8 +74,6 @@ class Listing(object):
for entry in flavors:
entry['href'] = request.path + '/' + entry['name']
# NOTE(wanghao): remove this in Newton.
entry['pool'] = entry['pool_group']
results['links'] = [
{
@ -101,9 +99,6 @@ class Resource(object):
validator_type = jsonschema.Draft4Validator
self._validators = {
'create': validator_type(schema.create),
'pool_group': validator_type(schema.patch_pool_group),
# NOTE(wanghao): Remove this in Newton.
'pool': validator_type(schema.patch_pool),
'capabilities': validator_type(schema.patch_capabilities),
}
@ -125,8 +120,6 @@ class Resource(object):
data = self._ctrl.get(flavor,
project=project_id,
detailed=detailed)
# NOTE(wanghao): remove this in Newton.
data['pool'] = data['pool_group']
except errors.FlavorDoesNotExist as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPNotFound(six.text_type(ex))
@ -140,7 +133,7 @@ class Resource(object):
::
{"pool_group": "my-pool-group", "capabilities": {}}
{"capabilities": {}}
A capabilities object may also be provided.
@ -151,19 +144,16 @@ class Resource(object):
data = wsgi_utils.load(request)
wsgi_utils.validate(self._validators['create'], data)
pool_group = data.get('pool_group') or data.get('pool')
try:
self._ctrl.create(flavor,
pool_group=pool_group,
project=project_id,
capabilities=data['capabilities'])
response.status = falcon.HTTP_201
response.location = request.path
except errors.PoolGroupDoesNotExist as ex:
LOG.exception(ex)
description = (_(u'Flavor %(flavor)s could not be created. '
u'Pool group %(pool_group)s does not exist') %
dict(flavor=flavor, pool_group=pool_group))
description = (_(u'Flavor %(flavor)s could not be created. ') %
dict(flavor=flavor))
raise falcon.HTTPBadRequest(_('Unable to create'), description)
def on_delete(self, request, response, project_id, flavor):
@ -192,11 +182,11 @@ class Resource(object):
LOG.debug(u'PATCH flavor - name: %s', flavor)
data = wsgi_utils.load(request)
EXPECT = ('pool_group', 'capabilities', 'pool')
EXPECT = ('capabilities')
if not any([(field in data) for field in EXPECT]):
LOG.debug(u'PATCH flavor, bad params')
raise wsgi_errors.HTTPBadRequestBody(
'One of `pool_group` or `capabilities` or `pool` needs '
'`capabilities` needs '
'to be specified'
)
@ -205,10 +195,6 @@ class Resource(object):
fields = common_utils.fields(data, EXPECT,
pred=lambda v: v is not None)
# NOTE(wanghao): remove this in Newton.
if fields.get('pool') and fields.get('pool_group') is None:
fields['pool_group'] = fields.get('pool')
fields.pop('pool')
try:
self._ctrl.update(flavor, project=project_id, **fields)

View File

@ -178,7 +178,6 @@ class Resource(object):
try:
self._ctrl.create(pool, weight=data['weight'],
uri=data['uri'],
group=data.get('group'),
options=data.get('options', {}))
response.status = falcon.HTTP_201
response.location = request.path
@ -226,11 +225,11 @@ class Resource(object):
LOG.debug(u'PATCH pool - name: %s', pool)
data = wsgi_utils.load(request)
EXPECT = ('weight', 'uri', 'group', 'options')
EXPECT = ('weight', 'uri', 'options')
if not any([(field in data) for field in EXPECT]):
LOG.debug(u'PATCH pool, bad params')
raise wsgi_errors.HTTPBadRequestBody(
'One of `uri`, `weight`, `group`, or `options` needs '
'One of `uri`, `weight`,or `options` needs '
'to be specified'
)

View File

@ -51,7 +51,7 @@ class Listing(object):
{
"flavors": [
{"href": "", "capabilities": {}, "pool_group": "",
{"href": "", "capabilities": {},
"pool_list": ""},
...
],
@ -65,7 +65,6 @@ class Listing(object):
"""
LOG.debug(u'LIST flavors for project_id %s', project_id)
store = {}
request.get_param('marker', store=store)
request.get_param_as_int('limit', store=store)
@ -87,8 +86,14 @@ class Listing(object):
for entry in flavors:
entry['href'] = request.path + '/' + entry['name']
# NOTE(gengchc): Remove pool_group in Rocky
entry['pool'] = entry['pool_group']
data = {}
data['name'] = entry['name']
pool_list = \
list(self._pools_ctrl.get_pools_by_flavor(flavor=data))
pool_name_list = []
if len(pool_list) > 0:
pool_name_list = [x['name'] for x in pool_list]
entry['pool_list'] = pool_name_list
if detailed:
caps = self._pools_ctrl.capabilities(flavor=entry)
entry['capabilities'] = [str(cap).split('.')[-1]
@ -120,13 +125,9 @@ class Resource(object):
def __init__(self, flavors_controller, pools_controller):
self._ctrl = flavors_controller
self._pools_ctrl = pools_controller
validator_type = jsonschema.Draft4Validator
self._validators = {
'create': validator_type(schema.create),
# NOTE(gengchc): Remove pool_group in Rocky.
'pool_group': validator_type(schema.patch_pool_group),
'pool': validator_type(schema.patch_pool),
'pool_list': validator_type(schema.patch_pool_list),
'capabilities': validator_type(schema.patch_capabilities),
}
@ -151,8 +152,6 @@ class Resource(object):
capabilities = self._pools_ctrl.capabilities(flavor=data)
data['capabilities'] = [str(cap).split('.')[-1]
for cap in capabilities]
# NOTE(gengchc): Remove pool_group in Rocky.
data['pool'] = data['pool_group']
pool_list =\
list(self._pools_ctrl.get_pools_by_flavor(flavor=data))
pool_name_list = []
@ -231,26 +230,6 @@ class Resource(object):
dict(flavor=flavor, msg=str(ex)))
raise falcon.HTTPBadRequest(_('Unable to create'), description)
def _on_put_by_group(self, request, response, project_id,
flavor, pool_group):
LOG.debug(u'PUT flavor - name: %s by group', flavor)
flavor_obj = {}
flavor_obj["pool_group"] = pool_group
capabilities = self._pools_ctrl.capabilities(flavor_obj)
try:
self._ctrl.create(flavor,
pool_group=pool_group,
project=project_id,
capabilities=capabilities)
response.status = falcon.HTTP_201
response.location = request.path
except errors.PoolGroupDoesNotExist as ex:
LOG.exception(ex)
description = (_(u'Flavor %(flavor)s could not be created. '
u'Pool group %(pool_group)s does not exist') %
dict(flavor=flavor, pool_group=pool_group))
raise falcon.HTTPBadRequest(_('Unable to create'), description)
@decorators.TransportLog("Flavors item")
@acl.enforce("flavors:create")
def on_put(self, request, response, project_id, flavor):
@ -258,8 +237,7 @@ class Resource(object):
::
{"pool_group": "my-pool-group",
"pool_list": [], "capabilities": {}}
{"pool_list": [], "capabilities": {}}
A capabilities object may also be provided.
@ -270,15 +248,10 @@ class Resource(object):
data = wsgi_utils.load(request)
wsgi_utils.validate(self._validators['create'], data)
LOG.debug(u'The pool_group will be removed in Rocky release.')
pool_group = data.get('pool_group') or data.get('pool')
pool_list = data.get('pool_list')
if pool_list is not None:
self._on_put_by_pool_list(request, response, project_id,
flavor, pool_list)
else:
self._on_put_by_group(request, response, project_id,
flavor, pool_group)
@decorators.TransportLog("Flavors item")
@acl.enforce("flavors:delete")
@ -367,54 +340,29 @@ class Resource(object):
resp_data['href'] = request.path
response.body = transport_utils.to_json(resp_data)
def _on_patch_by_group(self, request, response, project_id,
flavor, pool_group):
LOG.debug(u'PATCH flavor - name: %s by group', flavor)
resp_data = None
try:
flvor_obj = {}
flvor_obj['pool_group'] = pool_group
capabilities = self._pools_ctrl.capabilities(flavor=flvor_obj)
self._ctrl.update(flavor, project=project_id,
pool_group=pool_group,
capabilities=capabilities)
resp_data = self._ctrl.get(flavor, project=project_id)
resp_data['capabilities'] = [str(cap).split('.')[-1]
for cap in capabilities]
except errors.FlavorDoesNotExist as ex:
LOG.exception(ex)
raise wsgi_errors.HTTPNotFound(six.text_type(ex))
resp_data['href'] = request.path
response.body = transport_utils.to_json(resp_data)
@decorators.TransportLog("Flavors item")
@acl.enforce("flavors:update")
def on_patch(self, request, response, project_id, flavor):
"""Allows one to update a flavors'pool list.
This method expects the user to submit a JSON object
containing 'pool_group' or 'pool list'. If none is found,
containing 'pool list'. If none is found,
the request is flagged as bad. There is also strict format
checking through the use of jsonschema. Appropriate errors
are returned in each case for badly formatted input.
:returns: HTTP | [200, 400]
"""
LOG.debug(u'PATCH flavor - name: %s', flavor)
data = wsgi_utils.load(request)
# NOTE(gengchc2): remove pool_group in R release.
EXPECT = ('pool_group', 'pool', 'pool_list')
if not any([(field in data) for field in EXPECT]):
field = 'pool_list'
if field not in data:
LOG.debug(u'PATCH flavor, bad params')
raise wsgi_errors.HTTPBadRequestBody(
'`pool_group` or `pool` or `pool_list` needs to be specified'
'`pool_list` needs to be specified'
)
for field in EXPECT:
wsgi_utils.validate(self._validators[field], data)
LOG.debug(u'The pool_group will be removed in Rocky release.')
pool_group = data.get('pool_group') or data.get('pool')
pool_list = data.get('pool_list')
# NOTE(gengchc2): If pool_list is not None, configuration flavor is
# used by the new schema.
@ -422,6 +370,3 @@ class Resource(object):
if pool_list is not None:
self._on_patch_by_pool_list(request, response, project_id,
flavor, pool_list)
else:
self._on_patch_by_group(request, response, project_id,
flavor, pool_group)

View File

@ -136,7 +136,6 @@ class Resource(object):
self._validators = {
'weight': validator_type(schema.patch_weight),
'uri': validator_type(schema.patch_uri),
'group': validator_type(schema.patch_group),
'flavor': validator_type(schema.patch_flavor),
'options': validator_type(schema.patch_options),
'create': validator_type(schema.create)
@ -195,7 +194,6 @@ class Resource(object):
try:
self._ctrl.create(pool, weight=data['weight'],
uri=data['uri'],
group=data.get('group', None),
flavor=data.get('flavor', None),
options=data.get('options', {}))
response.status = falcon.HTTP_201
@ -236,7 +234,7 @@ class Resource(object):
"""Allows one to update a pool's weight, uri, and/or options.
This method expects the user to submit a JSON object
containing at least one of: 'uri', 'weight', 'group', 'flavor',
containing at least one of: 'uri', 'weight', 'flavor',
'options'.If none are found, the request is flagged as bad.
There is also strict format checking through the use of
jsonschema. Appropriate errors are returned in each case for
@ -248,11 +246,11 @@ class Resource(object):
LOG.debug(u'PATCH pool - name: %s', pool)
data = wsgi_utils.load(request)
EXPECT = ('weight', 'uri', 'group', 'flavor', 'options')
EXPECT = ('weight', 'uri', 'flavor', 'options')
if not any([(field in data) for field in EXPECT]):
LOG.debug(u'PATCH pool, bad params')
raise wsgi_errors.HTTPBadRequestBody(
'One of `uri`, `weight`, `group`, `flavor`,'
'One of `uri`, `weight`, `flavor`,'
' or `options` needs '
'to be specified'
)