Merge "Revert "change the backend index when change the volume driver" because this can cause ut error."

This commit is contained in:
Jenkins 2016-11-26 06:05:25 +00:00 committed by Gerrit Code Review
commit b499ceed0f
3 changed files with 6 additions and 545 deletions

View File

@ -470,8 +470,7 @@ class Controller(controller.BaseController):
return roles
# backend_index should be unique in cluster
def _get_cinder_volume_backend_index(self, req, disk_array, cluster_id,
cinder_volume_id=None):
def _get_cinder_volume_backend_index(self, req, disk_array, cluster_id):
cluster_roles = self._get_cluster_roles(req, cluster_id)
cinder_volumes = []
for role in cluster_roles:
@ -486,8 +485,6 @@ class Controller(controller.BaseController):
flag = True
for cinder_volume in cinder_volumes:
if backend_index == cinder_volume['backend_index']:
if cinder_volume['id'] == cinder_volume_id:
continue
index = index + 1
flag = False
break
@ -629,13 +626,8 @@ class Controller(controller.BaseController):
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
orgin_cinder_volume = self.get_cinder_volume_meta_or_404(req, id)
if 'role_id' in disk_meta:
role_detail = self.get_role_meta_or_404(
req, disk_meta['role_id'])
else:
role_detail = self.get_role_meta_or_404(
req, orgin_cinder_volume['role_id'])
self._raise_404_if_role_deleted(req, disk_meta['role_id'])
if ('volume_driver' in disk_meta and disk_meta[
'volume_driver'] not in CINDER_VOLUME_BACKEND_DRIVER):
msg = "volume_driver %s is not supported" % disk_meta[
@ -643,17 +635,11 @@ class Controller(controller.BaseController):
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
if disk_meta.get('volume_driver', None):
volume_driver = disk_meta['volume_driver']
disk_meta['backend_index'] = \
self._get_cinder_volume_backend_index(
req, disk_meta, role_detail['cluster_id'], id)
else:
volume_driver = orgin_cinder_volume['volume_driver']
orgin_cinder_volume = self.get_cinder_volume_meta_or_404(req, id)
volume_driver = disk_meta.get('volume_driver',
orgin_cinder_volume['volume_driver'])
if volume_driver == 'FUJITSU_ETERNUS':
if not disk_meta.get('root_pwd', None):
disk_meta['root_pwd'] = orgin_cinder_volume['root_pwd']
if not disk_meta['root_pwd']:
if not disk_meta.get('root_pwd', orgin_cinder_volume['root_pwd']):
msg = "root_pwd must be given " + \
"when using FUJITSU Disk Array"
LOG.error(msg)

View File

@ -1,300 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of CONF for use of fakes, and some black magic for
inline callbacks.
"""
import logging
import os
import shutil
# import uuid
import fixtures
import mock
import mox
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log
from oslo_messaging import conffixture as messaging_conffixture
from oslo_utils import strutils
from oslo_utils import timeutils
import stubout
import testtools
from daisy.db import migration
from daisy.db.sqlalchemy import api as sqla_api
from daisy import i18n
from daisy.tests import conf_fixture
# from daisy.tests import fake_notifier
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite db'), ]
CONF = cfg.CONF
CONF.register_opts(test_opts)
LOG = log.getLogger(__name__)
_DB_CACHE = None
class TestingException(Exception):
pass
class Database(fixtures.Fixture):
def __init__(self, db_api, db_migrate, sql_connection,
sqlite_db, sqlite_clean_db):
self.sql_connection = sql_connection
self.sqlite_db = sqlite_db
self.sqlite_clean_db = sqlite_clean_db
self.engine = db_api.get_engine()
self.engine.dispose()
conn = self.engine.connect()
db_migrate.db_sync()
if sql_connection == "sqlite://":
conn = self.engine.connect()
self._DB = "".join(line for line in conn.connection.iterdump())
self.engine.dispose()
else:
cleandb = os.path.join(CONF.state_path, sqlite_clean_db)
testdb = os.path.join(CONF.state_path, sqlite_db)
shutil.copyfile(testdb, cleandb)
def setUp(self):
super(Database, self).setUp()
if self.sql_connection == "sqlite://":
conn = self.engine.connect()
conn.connection.executescript(self._DB)
self.addCleanup(self.engine.dispose)
else:
shutil.copyfile(
os.path.join(CONF.state_path, self.sqlite_clean_db),
os.path.join(CONF.state_path, self.sqlite_db))
def _patch_mock_to_raise_for_invalid_assert_calls():
def raise_for_invalid_assert_calls(wrapped):
def wrapper(_self, name):
valid_asserts = [
'assert_called_with',
'assert_called_once_with',
'assert_has_calls',
'assert_any_calls']
if name.startswith('assert') and name not in valid_asserts:
raise AttributeError('%s is not a valid mock assert method'
% name)
return wrapped(_self, name)
return wrapper
mock.Mock.__getattr__ = raise_for_invalid_assert_calls(
mock.Mock.__getattr__)
# NOTE(gibi): needs to be called only once at import time
# to patch the mock lib
_patch_mock_to_raise_for_invalid_assert_calls()
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
# Import cinder objects for test cases
# objects.register_all()
# Unit tests do not need to use lazy gettext
i18n.enable_lazy(False)
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
environ_enabled = (lambda var_name:
strutils.bool_from_string(os.environ.get(var_name)))
if environ_enabled('OS_STDOUT_CAPTURE'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if environ_enabled('OS_STDERR_CAPTURE'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
if environ_enabled('OS_LOG_CAPTURE'):
log_format = '%(levelname)s [%(name)s] %(message)s'
if environ_enabled('OS_DEBUG'):
level = logging.DEBUG
else:
level = logging.INFO
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
format=log_format,
level=level))
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
self.messaging_conf.response_timeout = 15
self.useFixture(self.messaging_conf)
conf_fixture.set_defaults(CONF)
CONF([], default_config_files=[])
# NOTE(vish): We need a better method for creating fixtures for tests
# now that we have some required db setup for the system
# to work properly.
self.start = timeutils.utcnow()
CONF.set_default('connection', 'sqlite://', 'database')
CONF.set_default('sqlite_synchronous', False, 'database')
global _DB_CACHE
if not _DB_CACHE:
_DB_CACHE = Database(sqla_api, migration,
sql_connection=CONF.database.connection,
sqlite_db=CONF.database.sqlite_db,
sqlite_clean_db=CONF.sqlite_clean_db)
self.useFixture(_DB_CACHE)
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = stubout.StubOutForTesting()
self.addCleanup(CONF.reset)
self.addCleanup(self.mox.UnsetStubs)
self.addCleanup(self.stubs.UnsetAll)
self.addCleanup(self.stubs.SmartUnsetAll)
self.addCleanup(self.mox.VerifyAll)
self.addCleanup(self._common_cleanup)
self.injected = []
lock_path = self.useFixture(fixtures.TempDir()).path
lockutils.set_defaults(lock_path)
def _common_cleanup(self):
"""Runs after each test method to tear down test environment."""
# Stop any timers
for x in self.injected:
try:
x.stop()
except AssertionError:
pass
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
del self.__dict__[key]
def override_config(self, name, override, group=None):
"""Cleanly override CONF variables."""
self.addCleanup(CONF.clear_override, name, group)
def flags(self, **kw):
"""Override CONF variables for a test."""
for k, v in kw.iteritems():
self.override_config(k, v)
def log_level(self, level):
"""Set logging level to the specified value."""
log_root = logging.getLogger(None).logger
log_root.setLevel(level)
def mock_object(self, obj, attr_name, new_attr=None, **kwargs):
"""Use python mock to mock an object attribute
Mocks the specified objects attribute with the given value.
Automatically performs 'addCleanup' for the mock.
"""
if not new_attr:
new_attr = mock.Mock()
patcher = mock.patch.object(obj, attr_name, new_attr, **kwargs)
patcher.start()
self.addCleanup(patcher.stop)
return new_attr
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
def raise_assertion(msg):
d1str = d1
d2str = d2
base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '
'd2: %(d2str)s' %
{'msg': msg, 'd1str': d1str, 'd2str': d2str})
raise AssertionError(base_msg)
d1keys = set(d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = d1keys - d2keys
d2only = d2keys - d1keys
raise_assertion('Keys in d1 and not d2: %(d1only)s. '
'Keys in d2 and not d1: %(d2only)s' %
{'d1only': d1only, 'd2only': d2only})
for key in d1keys:
d1value = d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= tolerance
except (ValueError, TypeError):
# If both values aren't convertible to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
self.assertDictMatch(d1value, d2value)
elif 'DONTCARE' in (d1value, d2value):
continue
elif approx_equal and within_tolerance:
continue
elif d1value != d2value:
raise_assertion("d1['%(key)s']=%(d1value)s != "
"d2['%(key)s']=%(d2value)s" %
{
'key': key,
'd1value': d1value,
'd2value': d2value,
})

View File

@ -1,225 +0,0 @@
import mock
import webob
from oslo.serialization import jsonutils
from daisy.api.v1 import disk_array
from daisy.context import RequestContext
import daisy.registry.client.v1.api as registry
from daisy import test
def fake_do_request_for_get_roles(method, path, **params):
res = mock.Mock()
if method == "GET":
get_result = {'roles': [{'id': 'role_id_1'},
{'id': 'role_id_2'}]}
res.read.return_value = jsonutils.dumps(get_result)
return res
def set_cinder_volume_list():
cinder_vol_lists = [
{
'management_ips': '10.43.178.9',
'data_ips': '10.43.178.19',
'role_id': 'badb5177-4659-4b40-8e46-856ef5a121e0',
'volume_type': 'ext4',
'user_pwd': 'pwd',
'volume_driver': 'FUJITSU_ETERNUS',
'root_pwd': 'root',
'pools': 'pool2,pool3',
'backend_index': 'FUJITSU_ETERNUS-1',
'resource_pools': None,
'user_name': 'user',
'id': '77a3eec6-6cf0-4f84-82a4-e9339d824b3a'
},
{
'management_ips': '10.43.178.9',
'data_ips': '10.43.178.19',
'role_id': 'badb5177-4659-4b40-8e46-856ef5a121e0',
'volume_type': 'ext4',
'user_pwd': 'pwd',
'volume_driver': 'FUJITSU_ETERNUS',
'root_pwd': 'root',
'pools': 'pool3,pool4',
'backend_index': 'FUJITSU_ETERNUS-2',
'resource_pools': 'resource_pools',
'user_name': 'user',
'id': 'a1a726c6-161e-4a79-9b2b-a627d4722417'
}]
return cinder_vol_lists
def set_add_cinder_volume_info():
add_cinder_volume_info = {
'disk_array': "[{'management_ips': " +
"'10.43.178.9', 'data_ips': '10.43.178.19'," +
"'user_pwd': 'pwd', 'volume_type': 'ext4'," +
"'volume_driver': 'FUJITSU_ETERNUS', " +
"'root_pwd': 'root', 'pools': 'pool2,pool4'," +
"'resource_pools': 'resource_pools', " +
"'user_name': 'user'}]",
'role_id': 'badb5177-4659-4b40-8e46-856ef5a121e0'}
return add_cinder_volume_info
def returned_cinder_vol_info():
cinder_vol_info = {
'management_ips': '10.43.178.9',
'data_ips': '10.43.178.19',
'deleted': False,
'role_id': 'badb5177-4659-4b40-8e46-856ef5a121e0',
'volume_type': 'ext4',
'user_pwd': 'pwd',
'volume_driver': 'FUJITSU_ETERNUS',
'root_pwd': 'root',
'pools': 'pool2,pool4',
'backend_index': 'FUJITSU_ETERNUS-1',
'resource_pools': 'resource_pools',
'user_name': 'user',
'id': '77a3eec6-6cf0-4f84-82a4-e9339d824b3a'}
return cinder_vol_info
class TestDiskArray(test.TestCase):
def setUp(self):
super(TestDiskArray, self).setUp()
self.controller = disk_array.Controller()
self.req = webob.Request.blank('/')
self.req.context = RequestContext(is_admin=True,
user='fake user',
tenant='fake tenamet')
def test__get_cinder_volume_backend_index(self):
cluster_id = "cluster_id_123"
roles = [{'id': 'role_id_1'},
{'id': 'role_id_2'}]
cinder_volume_id = '3'
self.controller._get_cluster_roles =\
mock.Mock(return_value=roles)
cinder_volumes = [{'backend_index': 'KS3200_IPSAN-1',
'id': '1'},
{'backend_index': 'KS3200_IPSAN-2',
'id': '2'}]
self.controller._cinder_volume_list =\
mock.Mock(return_value=cinder_volumes)
disk_array_1 = {'volume_driver': 'KS3200_IPSAN'}
backend_index = self.controller._get_cinder_volume_backend_index(
self.req, disk_array_1, cluster_id)
self.assertEqual(backend_index, 'KS3200_IPSAN-3')
@mock.patch('daisy.registry.client.v1.api.get_role_metadata')
@mock.patch('daisy.registry.client.v1.api.'
'update_cinder_volume_metadata')
@mock.patch('daisy.registry.client.v1.api.'
'get_cinder_volume_detail_metadata')
def test_cinder_volume_update(self,
mock_get_cinder_volume,
mock_update_cinder_volume_metadata,
mock_get_role):
cinder_volume_id = '1'
mock_get_cinder_volume.return_value = \
{'id': '1',
'management_ips': ['10.4.5.7'],
'volume_driver': 'FUJITSU_ETERNUS',
'root_pwd': 'aaaa',
'data_ips': ['19.4.5.7'],
'role_id': '1'}
mock_get_role.return_value = {'cluster_id': '1'}
disk_meta = {
'management_ips': ['10.5.6.7'],
'data_ips': ['13.5.8.9'],
'root_pwd': 'bbbb'
}
mock_update_cinder_volume_metadata.return_value = \
{'id': '1',
'management_ips': ['10.5.6.7'],
'volume_driver': 'FUJITSU_ETERNUS',
'root_pwd': 'bbbb',
'data_ips': ['13.5.8.9']}
cinder_volume = self.controller.cinder_volume_update(
self.req, cinder_volume_id, disk_meta)
self.assertEqual('bbbb',
cinder_volume['disk_meta']['root_pwd'])
@mock.patch('daisy.registry.client.v1.api.get_role_metadata')
def test_cinder_volume_update_with_resource_pools(self, mock_get_role):
cinder_volume_lists = set_cinder_volume_list()
registry.list_cinder_volume_metadata = \
mock.Mock(return_value=cinder_volume_lists)
cinder_vol_info = returned_cinder_vol_info()
self.controller.get_cinder_volume_meta_or_404 = \
mock.Mock(return_value=cinder_vol_info)
mock_get_role.return_value = {'cluster_id': '1'}
disk_meta = {'resource_pools': 'pool3,pool4', 'root_pwd': 'root3'}
cinder_vol_info['resource_pools'] = disk_meta['resource_pools']
cinder_vol_info['root_pwd'] = disk_meta['root_pwd']
registry.update_cinder_volume_metadata = \
mock.Mock(return_value=cinder_vol_info)
cinder_vol_id = '77a3eec6-6cf0-4f84-82a4-e9339d824b3a'
return_info = self.controller.cinder_volume_update(self.req,
cinder_vol_id,
disk_meta)
self.assertEqual('root3',
return_info['disk_meta']['root_pwd'])
self.assertEqual('pool3,pool4',
return_info['disk_meta']['resource_pools'])
@mock.patch('daisy.registry.client.v1.api.'
'update_cinder_volume_metadata')
@mock.patch('daisy.registry.client.v1.api.'
'list_cinder_volume_metadata')
@mock.patch('daisy.registry.client.v1.api.get_roles_detail')
@mock.patch('daisy.registry.client.v1.api.get_role_metadata')
@mock.patch('daisy.registry.client.v1.api.'
'get_cinder_volume_detail_metadata')
def test_update_cinder_volume_with_same_volume_driver(
self, mock_get_cinder_volume, mock_get_role, mock_get_roles,
mock_get_cinder_volumes, mock_update_cinder_volume):
cinder_volume_id = '1'
disk_meta = {
'volume_driver': 'FUJITSU_ETERNUS',
'root_pwd': 'aaaaaaa',
'data_ips': ['192.168.1.2']
}
mock_get_cinder_volume.return_value = {
'role_id': '1', 'volume_driver': 'FUJITSU_ETERNUS',
'data_ips': ['192.1.3.4'], 'root_pwd': 'bbbbb'}
mock_get_role.return_value = {'cluster_id': '1'}
mock_get_roles.return_value = [{'id': '1'}]
mock_get_cinder_volumes.return_value = [
{'id': '1', 'backend_index': 'FUJITSU_ETERNUS-1'}]
mock_update_cinder_volume.return_value = {}
self.controller.cinder_volume_update(self.req, cinder_volume_id,
disk_meta)
self.assertEqual('FUJITSU_ETERNUS-1', disk_meta.get('backend_index', None))
@mock.patch('daisy.registry.client.v1.api.'
'update_cinder_volume_metadata')
@mock.patch('daisy.registry.client.v1.api.'
'list_cinder_volume_metadata')
@mock.patch('daisy.registry.client.v1.api.get_roles_detail')
@mock.patch('daisy.registry.client.v1.api.get_role_metadata')
@mock.patch('daisy.registry.client.v1.api.'
'get_cinder_volume_detail_metadata')
def test_update_cinder_volume_with_another_volume_driver(
self, mock_get_cinder_volume, mock_get_role, mock_get_roles,
mock_get_cinder_volumes, mock_update_cinder_volume):
cinder_volume_id = '2'
disk_meta = {
'volume_driver': 'FUJITSU_ETERNUS',
'root_pwd': 'aaaaaaa',
'data_ips': ['192.168.1.2']
}
mock_get_cinder_volume.return_value = {
'role_id': '1', 'volume_driver': 'NETAPP_FCSAN',
'data_ips': '', 'root_pwd': 'bbbbbbb'}
mock_get_role.return_value = {'cluster_id': '1'}
mock_get_roles.return_value = [{'id': '1'}]
mock_get_cinder_volumes.return_value = [
{'id': '1', 'backend_index': 'FUJITSU_ETERNUS-1'},
{'id': '2', 'backend_index': 'NETAPP_FCSAN-1'}]
mock_update_cinder_volume.return_value = {}
self.controller.cinder_volume_update(self.req,
cinder_volume_id, disk_meta)
self.assertEqual('FUJITSU_ETERNUS-2', disk_meta.get('backend_index', None))