Merge "Implement create backup for Cinder-API-GW"
This commit is contained in:
commit
e82d93a038
94
trio2o/cinder_apigw/controllers/volume_backup.py
Normal file
94
trio2o/cinder_apigw/controllers/volume_backup.py
Normal file
@ -0,0 +1,94 @@
|
||||
# Copyright 2016 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from pecan import expose
|
||||
from pecan import request
|
||||
from pecan import rest
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from trio2o.common import constants as cons
|
||||
import trio2o.common.context as t_context
|
||||
from trio2o.common import httpclient as hclient
|
||||
from trio2o.common.i18n import _
|
||||
from trio2o.common import utils
|
||||
|
||||
import trio2o.db.api as db_api
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VolumeBackupController(rest.RestController):
|
||||
def __init__(self, tenant_id):
|
||||
self.tenant_id = tenant_id
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def post(self, **kwargs):
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
||||
if 'backup' not in kwargs:
|
||||
return utils.format_cinder_error(
|
||||
400, _("Missing required element 'backup' in request body."))
|
||||
|
||||
volume_id = kwargs['backup']['volume_id']
|
||||
volume_mappings = db_api.get_bottom_mappings_by_top_id(
|
||||
context, volume_id, cons.RT_VOLUME)
|
||||
if not volume_mappings:
|
||||
return utils.format_cinder_error(
|
||||
404, _('Volume %(volume_id)s could not be found.') % {
|
||||
'volume_id': volume_id
|
||||
})
|
||||
|
||||
pod_name = volume_mappings[0][0]['pod_name']
|
||||
|
||||
t_release = cons.R_MITAKA
|
||||
b_release = cons.R_MITAKA
|
||||
|
||||
s_ctx = hclient.get_pod_service_ctx(
|
||||
context,
|
||||
request.url,
|
||||
pod_name,
|
||||
s_type=cons.ST_CINDER)
|
||||
|
||||
b_headers = hclient.convert_header(t_release,
|
||||
b_release,
|
||||
request.headers)
|
||||
t_vol = kwargs['backup']
|
||||
b_vol_req = hclient.convert_object(t_release, b_release, t_vol,
|
||||
res_type=cons.RT_BACKUP)
|
||||
b_body = jsonutils.dumps({'backup': b_vol_req})
|
||||
|
||||
resp = hclient.forward_req(
|
||||
context,
|
||||
'POST',
|
||||
b_headers,
|
||||
s_ctx['b_url'],
|
||||
b_body)
|
||||
b_status = resp.status_code
|
||||
b_ret_body = jsonutils.loads(resp.content)
|
||||
resp.status = b_status
|
||||
if b_status == 200:
|
||||
if b_ret_body.get('backup') is not None:
|
||||
b_backup_ret = b_ret_body['backup']
|
||||
|
||||
vol_ret = hclient.convert_object(b_release, t_release,
|
||||
b_backup_ret,
|
||||
res_type=cons.
|
||||
RT_BACKUP)
|
||||
|
||||
return {'backup': vol_ret}
|
||||
|
||||
return b_ret_body
|
@ -26,6 +26,7 @@ echo "Running Trio2o functional test suite..."
|
||||
# all test cases with following prefix
|
||||
TESTCASES="(tempest.api.volume.test_volumes_list"
|
||||
TESTCASES="$TESTCASES|tempest.api.volume.test_volumes_get"
|
||||
TESTCASES="$TESTCASES|tempest.api.volume.admin.test_volumes_backup.VolumesBackupsV2Test.test_volume_backup_create_get_detailed_list_restore_delete"
|
||||
# add new test cases like following line for volume_type test
|
||||
# TESTCASES="$TESTCASES|tempest.api.volume.admin.test_volumes_type"
|
||||
TESTCASES="$TESTCASES)"
|
||||
@ -160,7 +161,7 @@ ostestr --regex $TESTCASES
|
||||
# tempest.api.volume.admin.test_volumes_actions.VolumesActionsV2Test.test_volume_reset_status[id-d063f96e-a2e0-4f34-8b8a-395c42de1845]
|
||||
# tempest.api.volume.admin.test_volumes_backup.VolumesBackupsV1Test.test_volume_backup_create_get_detailed_list_restore_delete[id-a66eb488-8ee1-47d4-8e9f-575a095728c6]
|
||||
# tempest.api.volume.admin.test_volumes_backup.VolumesBackupsV1Test.test_volume_backup_export_import[id-a99c54a1-dd80-4724-8a13-13bf58d4068d]
|
||||
# tempest.api.volume.admin.test_volumes_backup.VolumesBackupsV2Test.test_volume_backup_create_get_detailed_list_restore_delete[id-a66eb488-8ee1-47d4-8e9f-575a095728c6]
|
||||
# **DONE** tempest.api.volume.admin.test_volumes_backup.VolumesBackupsV2Test.test_volume_backup_create_get_detailed_list_restore_delete[id-a66eb488-8ee1-47d4-8e9f-575a095728c6]
|
||||
# tempest.api.volume.admin.test_volumes_backup.VolumesBackupsV2Test.test_volume_backup_export_import[id-a99c54a1-dd80-4724-8a13-13bf58d4068d]
|
||||
# tempest.api.volume.test_availability_zone.AvailabilityZoneV1TestJSON.test_get_availability_zone_list[id-01f1ae88-eba9-4c6b-a011-6f7ace06b725]
|
||||
# tempest.api.volume.test_availability_zone.AvailabilityZoneV2TestJSON.test_get_availability_zone_list[id-01f1ae88-eba9-4c6b-a011-6f7ace06b725]
|
||||
|
@ -0,0 +1,131 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from mock import patch
|
||||
import pecan
|
||||
import unittest
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from trio2o.cinder_apigw.controllers import volume_backup as backup
|
||||
from trio2o.common import constants
|
||||
from trio2o.common import context
|
||||
from trio2o.common import httpclient as hclient
|
||||
from trio2o.db import api
|
||||
from trio2o.db import core
|
||||
from trio2o.db import models
|
||||
|
||||
|
||||
class FakeResponse(object):
|
||||
def __new__(cls, code=500):
|
||||
cls.status = code
|
||||
cls.status_code = code
|
||||
cls.content = None
|
||||
return super(FakeResponse, cls).__new__(cls)
|
||||
|
||||
|
||||
class FakeRequest(object):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
cls.url = '/lodfdfdf'
|
||||
cls.header = None
|
||||
return super(FakeRequest, cls).__new__(cls)
|
||||
|
||||
|
||||
class VolumeBackupsTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
core.initialize()
|
||||
core.ModelBase.metadata.create_all(core.get_engine())
|
||||
self.context = context.Context()
|
||||
self.project_id = 'test_project'
|
||||
self.context.tenant = self.project_id
|
||||
self.controller = backup.VolumeBackupController(self.project_id)
|
||||
|
||||
def _prepare_pod(self, bottom_pod_num=1):
|
||||
t_pod = {'pod_id': 't_pod_uuid', 'pod_name': 't_region',
|
||||
'az_name': ''}
|
||||
api.create_pod(self.context, t_pod)
|
||||
b_pods = []
|
||||
if bottom_pod_num == 1:
|
||||
b_pod = {'pod_id': 'b_pod_uuid', 'pod_name': 'b_region',
|
||||
'az_name': 'b_az'}
|
||||
api.create_pod(self.context, b_pod)
|
||||
b_pods.append(b_pod)
|
||||
else:
|
||||
for i in xrange(1, bottom_pod_num + 1):
|
||||
b_pod = {'pod_id': 'b_pod_%d_uuid' % i,
|
||||
'pod_name': 'b_region_%d' % i,
|
||||
'az_name': 'b_az_%d' % i}
|
||||
api.create_pod(self.context, b_pod)
|
||||
b_pods.append(b_pod)
|
||||
return t_pod, b_pods
|
||||
|
||||
def _prepare_volume(self, pod):
|
||||
t_volume_id = uuidutils.generate_uuid()
|
||||
b_volume_id = t_volume_id
|
||||
with self.context.session.begin():
|
||||
core.create_resource(
|
||||
self.context, models.ResourceRouting,
|
||||
{'top_id': t_volume_id, 'bottom_id': b_volume_id,
|
||||
'pod_id': pod['pod_id'], 'project_id': self.project_id,
|
||||
'resource_type': constants.RT_VOLUME})
|
||||
return t_volume_id
|
||||
|
||||
def _prepare_pod_service(self, pod_id, service):
|
||||
config_dict = {'service_id': uuidutils.generate_uuid(),
|
||||
'pod_id': pod_id,
|
||||
'service_type': service,
|
||||
'service_url': 'fake_pod_service'}
|
||||
api.create_pod_service_configuration(self.context, config_dict)
|
||||
pass
|
||||
|
||||
def _prepare_server(self, pod):
|
||||
t_server_id = uuidutils.generate_uuid()
|
||||
b_server_id = t_server_id
|
||||
with self.context.session.begin():
|
||||
core.create_resource(
|
||||
self.context, models.ResourceRouting,
|
||||
{'top_id': t_server_id, 'bottom_id': b_server_id,
|
||||
'pod_id': pod['pod_id'], 'project_id': self.project_id,
|
||||
'resource_type': constants.RT_SERVER})
|
||||
return t_server_id
|
||||
|
||||
@patch.object(pecan, 'response', new=FakeResponse)
|
||||
@patch.object(jsonutils, 'loads')
|
||||
@patch.object(hclient, 'forward_req')
|
||||
@patch.object(pecan, 'request')
|
||||
@patch.object(context, 'extract_context_from_environ')
|
||||
def test_create_backup(self, mock_context, mock_request,
|
||||
mock_forward_req, mock_loads):
|
||||
mock_context.return_value = self.context
|
||||
pecan.core.state = mock_request
|
||||
mock_forward_req.return_value = FakeResponse(200)
|
||||
fake_resp = {'fakeresp': 'fakeresp'}
|
||||
mock_loads.return_value = fake_resp
|
||||
t_pod, b_pods = self._prepare_pod()
|
||||
self._prepare_pod_service(b_pods[0]['pod_id'], constants.ST_CINDER)
|
||||
t_volume_id = self._prepare_volume(b_pods[0])
|
||||
|
||||
body = {"backup": {"container": None,
|
||||
"description": None,
|
||||
"name": "backup001",
|
||||
"volume_id": t_volume_id,
|
||||
"incremental": True}}
|
||||
|
||||
res = self.controller.post(**body)
|
||||
self.assertEqual(fake_resp, res)
|
||||
|
||||
def tearDown(self):
|
||||
core.ModelBase.metadata.drop_all(core.get_engine())
|
Loading…
Reference in New Issue
Block a user