Multi-Region Support
This is an initial attempt at supporting multiple regions. It should handle the mechanics of deploying an instance/volume to a remote region. Additional changes may be required to allow the guest agent on the instance to connect back to the originating region. Co-Authored-By: Petr Malik <pmalik@tesora.com> Change-Id: I780de59dae5f90955139ab8393cf7d59ff3a21f6
This commit is contained in:
parent
8b98c51708
commit
3f93ff110b
@ -1,5 +1,5 @@
|
|||||||
HTTP/1.1 200 OK
|
HTTP/1.1 200 OK
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
Content-Length: 694
|
Content-Length: 717
|
||||||
Date: Mon, 18 Mar 2013 19:09:17 GMT
|
Date: Mon, 18 Mar 2013 19:09:17 GMT
|
||||||
|
|
||||||
|
@ -31,6 +31,7 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"name": "backup_instance",
|
"name": "backup_instance",
|
||||||
|
"region": "RegionOne",
|
||||||
"status": "BUILD",
|
"status": "BUILD",
|
||||||
"updated": "2014-10-30T12:30:00",
|
"updated": "2014-10-30T12:30:00",
|
||||||
"volume": {
|
"volume": {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
HTTP/1.1 200 OK
|
HTTP/1.1 200 OK
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
Content-Length: 697
|
Content-Length: 720
|
||||||
Date: Mon, 18 Mar 2013 19:09:17 GMT
|
Date: Mon, 18 Mar 2013 19:09:17 GMT
|
||||||
|
|
||||||
|
@ -31,6 +31,7 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"name": "json_rack_instance",
|
"name": "json_rack_instance",
|
||||||
|
"region": "RegionOne",
|
||||||
"status": "BUILD",
|
"status": "BUILD",
|
||||||
"updated": "2014-10-30T12:30:00",
|
"updated": "2014-10-30T12:30:00",
|
||||||
"volume": {
|
"volume": {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
HTTP/1.1 200 OK
|
HTTP/1.1 200 OK
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
Content-Length: 712
|
Content-Length: 735
|
||||||
Date: Mon, 18 Mar 2013 19:09:17 GMT
|
Date: Mon, 18 Mar 2013 19:09:17 GMT
|
||||||
|
|
||||||
|
@ -31,6 +31,7 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"name": "json_rack_instance",
|
"name": "json_rack_instance",
|
||||||
|
"region": "RegionOne",
|
||||||
"status": "ACTIVE",
|
"status": "ACTIVE",
|
||||||
"updated": "2014-10-30T12:30:00",
|
"updated": "2014-10-30T12:30:00",
|
||||||
"volume": {
|
"volume": {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
HTTP/1.1 200 OK
|
HTTP/1.1 200 OK
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
Content-Length: 1251
|
Content-Length: 1297
|
||||||
Date: Mon, 18 Mar 2013 19:09:17 GMT
|
Date: Mon, 18 Mar 2013 19:09:17 GMT
|
||||||
|
|
||||||
|
@ -31,6 +31,7 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"name": "The Third Instance",
|
"name": "The Third Instance",
|
||||||
|
"region": "RegionOne",
|
||||||
"status": "ACTIVE",
|
"status": "ACTIVE",
|
||||||
"volume": {
|
"volume": {
|
||||||
"size": 2
|
"size": 2
|
||||||
@ -67,6 +68,7 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"name": "json_rack_instance",
|
"name": "json_rack_instance",
|
||||||
|
"region": "RegionOne",
|
||||||
"status": "ACTIVE",
|
"status": "ACTIVE",
|
||||||
"volume": {
|
"volume": {
|
||||||
"size": 2
|
"size": 2
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
HTTP/1.1 200 OK
|
HTTP/1.1 200 OK
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
Content-Length: 633
|
Content-Length: 656
|
||||||
Date: Mon, 18 Mar 2013 19:09:17 GMT
|
Date: Mon, 18 Mar 2013 19:09:17 GMT
|
||||||
|
|
||||||
|
@ -31,6 +31,7 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"name": "json_rack_instance",
|
"name": "json_rack_instance",
|
||||||
|
"region": "RegionOne",
|
||||||
"status": "ACTIVE",
|
"status": "ACTIVE",
|
||||||
"volume": {
|
"volume": {
|
||||||
"size": 2
|
"size": 2
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
HTTP/1.1 200 OK
|
HTTP/1.1 200 OK
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
Content-Length: 1533
|
Content-Length: 1556
|
||||||
Date: Mon, 18 Mar 2013 19:09:17 GMT
|
Date: Mon, 18 Mar 2013 19:09:17 GMT
|
||||||
|
|
||||||
|
@ -36,6 +36,7 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"name": "json_rack_instance",
|
"name": "json_rack_instance",
|
||||||
|
"region": "RegionOne",
|
||||||
"root_enabled": "2014-10-30T12:30:00",
|
"root_enabled": "2014-10-30T12:30:00",
|
||||||
"root_enabled_by": "3000",
|
"root_enabled_by": "3000",
|
||||||
"server": {
|
"server": {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
HTTP/1.1 200 OK
|
HTTP/1.1 200 OK
|
||||||
Content-Type: application/json
|
Content-Type: application/json
|
||||||
Content-Length: 1082
|
Content-Length: 1105
|
||||||
Date: Mon, 18 Mar 2013 19:09:17 GMT
|
Date: Mon, 18 Mar 2013 19:09:17 GMT
|
||||||
|
|
||||||
|
@ -34,6 +34,7 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"name": "json_rack_instance",
|
"name": "json_rack_instance",
|
||||||
|
"region": "RegionOne",
|
||||||
"server": {
|
"server": {
|
||||||
"deleted": false,
|
"deleted": false,
|
||||||
"deleted_at": null,
|
"deleted_at": null,
|
||||||
|
3
releasenotes/notes/multi-region-cd8da560bfe00de5.yaml
Normal file
3
releasenotes/notes/multi-region-cd8da560bfe00de5.yaml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
features:
|
||||||
|
- Adds a region property to the instance model and table. This is the
|
||||||
|
first step in multi-region support.
|
@ -23,6 +23,7 @@ python-keystoneclient>=3.6.0 # Apache-2.0
|
|||||||
python-swiftclient>=2.2.0 # Apache-2.0
|
python-swiftclient>=2.2.0 # Apache-2.0
|
||||||
python-designateclient>=1.5.0 # Apache-2.0
|
python-designateclient>=1.5.0 # Apache-2.0
|
||||||
python-neutronclient>=5.1.0 # Apache-2.0
|
python-neutronclient>=5.1.0 # Apache-2.0
|
||||||
|
python-glanceclient>=2.5.0 # Apache-2.0
|
||||||
iso8601>=0.1.11 # MIT
|
iso8601>=0.1.11 # MIT
|
||||||
jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT
|
jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT
|
||||||
Jinja2>=2.8 # BSD License (3 clause)
|
Jinja2>=2.8 # BSD License (3 clause)
|
||||||
|
@ -693,6 +693,30 @@
|
|||||||
"Instance of 'Table' has no 'create_column' member",
|
"Instance of 'Table' has no 'create_column' member",
|
||||||
"upgrade"
|
"upgrade"
|
||||||
],
|
],
|
||||||
|
[
|
||||||
|
"trove/db/sqlalchemy/migrate_repo/versions/039_region.py",
|
||||||
|
"E1101",
|
||||||
|
"Instance of 'Table' has no 'create_column' member",
|
||||||
|
"upgrade"
|
||||||
|
],
|
||||||
|
[
|
||||||
|
"trove/db/sqlalchemy/migrate_repo/versions/039_region.py",
|
||||||
|
"E1120",
|
||||||
|
"No value for argument 'dml' in method call",
|
||||||
|
"upgrade"
|
||||||
|
],
|
||||||
|
[
|
||||||
|
"trove/db/sqlalchemy/migrate_repo/versions/039_region.py",
|
||||||
|
"no-member",
|
||||||
|
"Instance of 'Table' has no 'create_column' member",
|
||||||
|
"upgrade"
|
||||||
|
],
|
||||||
|
[
|
||||||
|
"trove/db/sqlalchemy/migrate_repo/versions/039_region.py",
|
||||||
|
"no-value-for-parameter",
|
||||||
|
"No value for argument 'dml' in method call",
|
||||||
|
"upgrade"
|
||||||
|
],
|
||||||
[
|
[
|
||||||
"trove/db/sqlalchemy/migration.py",
|
"trove/db/sqlalchemy/migration.py",
|
||||||
"E0611",
|
"E0611",
|
||||||
|
@ -341,24 +341,27 @@ def is_cluster_deleting(context, cluster_id):
|
|||||||
|
|
||||||
def validate_instance_flavors(context, instances,
|
def validate_instance_flavors(context, instances,
|
||||||
volume_enabled, ephemeral_enabled):
|
volume_enabled, ephemeral_enabled):
|
||||||
"""Load and validate flavors for given instance definitions."""
|
"""Validate flavors for given instance definitions."""
|
||||||
flavors = dict()
|
nova_cli_cache = dict()
|
||||||
nova_client = remote.create_nova_client(context)
|
|
||||||
for instance in instances:
|
for instance in instances:
|
||||||
|
region_name = instance.get('region_name')
|
||||||
flavor_id = instance['flavor_id']
|
flavor_id = instance['flavor_id']
|
||||||
if flavor_id not in flavors:
|
|
||||||
try:
|
try:
|
||||||
|
if region_name in nova_cli_cache:
|
||||||
|
nova_client = nova_cli_cache[region_name]
|
||||||
|
else:
|
||||||
|
nova_client = remote.create_nova_client(
|
||||||
|
context, region_name)
|
||||||
|
nova_cli_cache[region_name] = nova_client
|
||||||
|
|
||||||
flavor = nova_client.flavors.get(flavor_id)
|
flavor = nova_client.flavors.get(flavor_id)
|
||||||
if (not volume_enabled and
|
if (not volume_enabled and
|
||||||
(ephemeral_enabled and flavor.ephemeral == 0)):
|
(ephemeral_enabled and flavor.ephemeral == 0)):
|
||||||
raise exception.LocalStorageNotSpecified(
|
raise exception.LocalStorageNotSpecified(
|
||||||
flavor=flavor_id)
|
flavor=flavor_id)
|
||||||
flavors[flavor_id] = flavor
|
|
||||||
except nova_exceptions.NotFound:
|
except nova_exceptions.NotFound:
|
||||||
raise exception.FlavorNotFound(uuid=flavor_id)
|
raise exception.FlavorNotFound(uuid=flavor_id)
|
||||||
|
|
||||||
return flavors
|
|
||||||
|
|
||||||
|
|
||||||
def get_required_volume_size(instances, volume_enabled):
|
def get_required_volume_size(instances, volume_enabled):
|
||||||
"""Calculate the total Trove volume size for given instances."""
|
"""Calculate the total Trove volume size for given instances."""
|
||||||
|
@ -174,6 +174,7 @@ class ClusterController(wsgi.Controller):
|
|||||||
"volume_type": volume_type,
|
"volume_type": volume_type,
|
||||||
"nics": nics,
|
"nics": nics,
|
||||||
"availability_zone": availability_zone,
|
"availability_zone": availability_zone,
|
||||||
|
'region_name': node.get('region_name'),
|
||||||
"modules": modules})
|
"modules": modules})
|
||||||
|
|
||||||
locality = body['cluster'].get('locality')
|
locality = body['cluster'].get('locality')
|
||||||
|
@ -250,6 +250,7 @@ cluster = {
|
|||||||
"nics": nics,
|
"nics": nics,
|
||||||
"availability_zone": non_empty_string,
|
"availability_zone": non_empty_string,
|
||||||
"modules": module_list,
|
"modules": module_list,
|
||||||
|
"region_name": non_empty_string
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -287,7 +288,8 @@ cluster = {
|
|||||||
"availability_zone": non_empty_string,
|
"availability_zone": non_empty_string,
|
||||||
"modules": module_list,
|
"modules": module_list,
|
||||||
"related_to": non_empty_string,
|
"related_to": non_empty_string,
|
||||||
"type": non_empty_string
|
"type": non_empty_string,
|
||||||
|
"region_name": non_empty_string
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -349,6 +351,7 @@ instance = {
|
|||||||
},
|
},
|
||||||
"nics": nics,
|
"nics": nics,
|
||||||
"modules": module_list,
|
"modules": module_list,
|
||||||
|
"region_name": non_empty_string,
|
||||||
"locality": non_empty_string
|
"locality": non_empty_string
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -92,8 +92,18 @@ common_opts = [
|
|||||||
help='Service type to use when searching catalog.'),
|
help='Service type to use when searching catalog.'),
|
||||||
cfg.StrOpt('swift_endpoint_type', default='publicURL',
|
cfg.StrOpt('swift_endpoint_type', default='publicURL',
|
||||||
help='Service endpoint type to use when searching catalog.'),
|
help='Service endpoint type to use when searching catalog.'),
|
||||||
|
cfg.URIOpt('glance_url', help='URL ending in ``AUTH_``.'),
|
||||||
|
cfg.StrOpt('glance_service_type', default='image',
|
||||||
|
help='Service type to use when searching catalog.'),
|
||||||
|
cfg.StrOpt('glance_endpoint_type', default='publicURL',
|
||||||
|
help='Service endpoint type to use when searching catalog.'),
|
||||||
cfg.URIOpt('trove_auth_url', default='http://0.0.0.0:5000/v2.0',
|
cfg.URIOpt('trove_auth_url', default='http://0.0.0.0:5000/v2.0',
|
||||||
help='Trove authentication URL.'),
|
help='Trove authentication URL.'),
|
||||||
|
cfg.StrOpt('trove_url', help='URL without the tenant segment.'),
|
||||||
|
cfg.StrOpt('trove_service_type', default='database',
|
||||||
|
help='Service type to use when searching catalog.'),
|
||||||
|
cfg.StrOpt('trove_endpoint_type', default='publicURL',
|
||||||
|
help='Service endpoint type to use when searching catalog.'),
|
||||||
cfg.IPOpt('host', default='0.0.0.0',
|
cfg.IPOpt('host', default='0.0.0.0',
|
||||||
help='Host to listen for RPC messages.'),
|
help='Host to listen for RPC messages.'),
|
||||||
cfg.IntOpt('report_interval', default=30,
|
cfg.IntOpt('report_interval', default=30,
|
||||||
@ -328,11 +338,17 @@ common_opts = [
|
|||||||
cfg.StrOpt('remote_swift_client',
|
cfg.StrOpt('remote_swift_client',
|
||||||
default='trove.common.remote.swift_client',
|
default='trove.common.remote.swift_client',
|
||||||
help='Client to send Swift calls to.'),
|
help='Client to send Swift calls to.'),
|
||||||
|
cfg.StrOpt('remote_trove_client',
|
||||||
|
default='trove.common.trove_remote.trove_client',
|
||||||
|
help='Client to send Trove calls to.'),
|
||||||
|
cfg.StrOpt('remote_glance_client',
|
||||||
|
default='trove.common.glance_remote.glance_client',
|
||||||
|
help='Client to send Glance calls to.'),
|
||||||
cfg.StrOpt('exists_notification_transformer',
|
cfg.StrOpt('exists_notification_transformer',
|
||||||
help='Transformer for exists notifications.'),
|
help='Transformer for exists notifications.'),
|
||||||
cfg.IntOpt('exists_notification_interval', default=3600,
|
cfg.IntOpt('exists_notification_interval', default=3600,
|
||||||
help='Seconds to wait between pushing events.'),
|
help='Seconds to wait between pushing events.'),
|
||||||
cfg.IntOpt('quota_notification_interval', default=3600,
|
cfg.IntOpt('quota_notification_interval',
|
||||||
help='Seconds to wait between pushing events.'),
|
help='Seconds to wait between pushing events.'),
|
||||||
cfg.DictOpt('notification_service_id',
|
cfg.DictOpt('notification_service_id',
|
||||||
default={'mysql': '2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b',
|
default={'mysql': '2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b',
|
||||||
|
53
trove/common/glance_remote.py
Normal file
53
trove/common/glance_remote.py
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
# Copyright 2016 Tesora Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from keystoneauth1.identity import v3
|
||||||
|
from keystoneauth1 import session as ka_session
|
||||||
|
|
||||||
|
from oslo_utils.importutils import import_class
|
||||||
|
|
||||||
|
from trove.common import cfg
|
||||||
|
from trove.common.remote import get_endpoint
|
||||||
|
from trove.common.remote import normalize_url
|
||||||
|
|
||||||
|
from glanceclient import Client
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
def glance_client(context, region_name=None):
|
||||||
|
|
||||||
|
# We should allow glance to get the endpoint from the service
|
||||||
|
# catalog, but to do so we would need to be able to specify
|
||||||
|
# the endpoint_filter on the API calls, but glance
|
||||||
|
# doesn't currently allow that. As a result, we must
|
||||||
|
# specify the endpoint explicitly.
|
||||||
|
if CONF.glance_url:
|
||||||
|
endpoint_url = '%(url)s%(tenant)s' % {
|
||||||
|
'url': normalize_url(CONF.glance_url),
|
||||||
|
'tenant': context.tenant}
|
||||||
|
else:
|
||||||
|
endpoint_url = get_endpoint(
|
||||||
|
context.service_catalog, service_type=CONF.glance_service_type,
|
||||||
|
endpoint_region=region_name or CONF.os_region_name,
|
||||||
|
endpoint_type=CONF.glance_endpoint_type)
|
||||||
|
|
||||||
|
auth = v3.Token(CONF.trove_auth_url, context.auth_token)
|
||||||
|
session = ka_session.Session(auth=auth)
|
||||||
|
|
||||||
|
return Client('2', endpoint=endpoint_url, session=session)
|
||||||
|
|
||||||
|
|
||||||
|
create_glance_client = import_class(CONF.remote_glance_client)
|
@ -103,21 +103,28 @@ class NetworkRemoteModelBase(RemoteModelBase):
|
|||||||
network_driver = None
|
network_driver = None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_driver(cls, context):
|
def get_driver(cls, context, region_name):
|
||||||
if not cls.network_driver:
|
if not cls.network_driver:
|
||||||
cls.network_driver = import_class(CONF.network_driver)
|
cls.network_driver = import_class(CONF.network_driver)
|
||||||
return cls.network_driver(context)
|
return cls.network_driver(context, region_name)
|
||||||
|
|
||||||
|
|
||||||
class NovaRemoteModelBase(RemoteModelBase):
|
class NovaRemoteModelBase(RemoteModelBase):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_client(cls, context):
|
def get_client(cls, context, region_name):
|
||||||
return remote.create_nova_client(context)
|
return remote.create_nova_client(context, region_name)
|
||||||
|
|
||||||
|
|
||||||
class SwiftRemoteModelBase(RemoteModelBase):
|
class SwiftRemoteModelBase(RemoteModelBase):
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_client(cls, context, region_name):
|
||||||
|
return remote.create_swift_client(context, region_name)
|
||||||
|
|
||||||
|
|
||||||
|
class CinderRemoteModelBase(RemoteModelBase):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_client(cls, context):
|
def get_client(cls, context):
|
||||||
return remote.create_swift_client(context)
|
return remote.create_cinder_client(context)
|
||||||
|
@ -436,7 +436,7 @@ class DBaaSInstanceCreate(DBaaSAPINotification):
|
|||||||
|
|
||||||
def required_start_traits(self):
|
def required_start_traits(self):
|
||||||
return ['name', 'flavor_id', 'datastore', 'datastore_version',
|
return ['name', 'flavor_id', 'datastore', 'datastore_version',
|
||||||
'image_id', 'availability_zone']
|
'image_id', 'availability_zone', 'region_name']
|
||||||
|
|
||||||
def optional_start_traits(self):
|
def optional_start_traits(self):
|
||||||
return ['databases', 'users', 'volume_size', 'restore_point',
|
return ['databases', 'users', 'volume_size', 'restore_point',
|
||||||
@ -789,3 +789,14 @@ class DBaaSInstanceUpgrade(DBaaSAPINotification):
|
|||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def required_start_traits(self):
|
def required_start_traits(self):
|
||||||
return ['instance_id', 'datastore_version_id']
|
return ['instance_id', 'datastore_version_id']
|
||||||
|
|
||||||
|
|
||||||
|
class DBaaSInstanceMigrate(DBaaSAPINotification):
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def event_type(self):
|
||||||
|
return 'migrate'
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def required_start_traits(self):
|
||||||
|
return ['host']
|
||||||
|
@ -87,7 +87,7 @@ def guest_client(context, id, manager=None):
|
|||||||
return clazz(context, id)
|
return clazz(context, id)
|
||||||
|
|
||||||
|
|
||||||
def nova_client(context):
|
def nova_client(context, region_name=None):
|
||||||
if CONF.nova_compute_url:
|
if CONF.nova_compute_url:
|
||||||
url = '%(nova_url)s%(tenant)s' % {
|
url = '%(nova_url)s%(tenant)s' % {
|
||||||
'nova_url': normalize_url(CONF.nova_compute_url),
|
'nova_url': normalize_url(CONF.nova_compute_url),
|
||||||
@ -95,7 +95,7 @@ def nova_client(context):
|
|||||||
else:
|
else:
|
||||||
url = get_endpoint(context.service_catalog,
|
url = get_endpoint(context.service_catalog,
|
||||||
service_type=CONF.nova_compute_service_type,
|
service_type=CONF.nova_compute_service_type,
|
||||||
endpoint_region=CONF.os_region_name,
|
endpoint_region=region_name or CONF.os_region_name,
|
||||||
endpoint_type=CONF.nova_compute_endpoint_type)
|
endpoint_type=CONF.nova_compute_endpoint_type)
|
||||||
|
|
||||||
client = Client(CONF.nova_client_version, context.user, context.auth_token,
|
client = Client(CONF.nova_client_version, context.user, context.auth_token,
|
||||||
@ -116,7 +116,7 @@ def create_admin_nova_client(context):
|
|||||||
return client
|
return client
|
||||||
|
|
||||||
|
|
||||||
def cinder_client(context):
|
def cinder_client(context, region_name=None):
|
||||||
if CONF.cinder_url:
|
if CONF.cinder_url:
|
||||||
url = '%(cinder_url)s%(tenant)s' % {
|
url = '%(cinder_url)s%(tenant)s' % {
|
||||||
'cinder_url': normalize_url(CONF.cinder_url),
|
'cinder_url': normalize_url(CONF.cinder_url),
|
||||||
@ -124,7 +124,7 @@ def cinder_client(context):
|
|||||||
else:
|
else:
|
||||||
url = get_endpoint(context.service_catalog,
|
url = get_endpoint(context.service_catalog,
|
||||||
service_type=CONF.cinder_service_type,
|
service_type=CONF.cinder_service_type,
|
||||||
endpoint_region=CONF.os_region_name,
|
endpoint_region=region_name or CONF.os_region_name,
|
||||||
endpoint_type=CONF.cinder_endpoint_type)
|
endpoint_type=CONF.cinder_endpoint_type)
|
||||||
|
|
||||||
client = CinderClient.Client(context.user, context.auth_token,
|
client = CinderClient.Client(context.user, context.auth_token,
|
||||||
@ -135,7 +135,7 @@ def cinder_client(context):
|
|||||||
return client
|
return client
|
||||||
|
|
||||||
|
|
||||||
def heat_client(context):
|
def heat_client(context, region_name=None):
|
||||||
if CONF.heat_url:
|
if CONF.heat_url:
|
||||||
url = '%(heat_url)s%(tenant)s' % {
|
url = '%(heat_url)s%(tenant)s' % {
|
||||||
'heat_url': normalize_url(CONF.heat_url),
|
'heat_url': normalize_url(CONF.heat_url),
|
||||||
@ -143,7 +143,7 @@ def heat_client(context):
|
|||||||
else:
|
else:
|
||||||
url = get_endpoint(context.service_catalog,
|
url = get_endpoint(context.service_catalog,
|
||||||
service_type=CONF.heat_service_type,
|
service_type=CONF.heat_service_type,
|
||||||
endpoint_region=CONF.os_region_name,
|
endpoint_region=region_name or CONF.os_region_name,
|
||||||
endpoint_type=CONF.heat_endpoint_type)
|
endpoint_type=CONF.heat_endpoint_type)
|
||||||
|
|
||||||
client = HeatClient.Client(token=context.auth_token,
|
client = HeatClient.Client(token=context.auth_token,
|
||||||
@ -152,7 +152,7 @@ def heat_client(context):
|
|||||||
return client
|
return client
|
||||||
|
|
||||||
|
|
||||||
def swift_client(context):
|
def swift_client(context, region_name=None):
|
||||||
if CONF.swift_url:
|
if CONF.swift_url:
|
||||||
# swift_url has a different format so doesn't need to be normalized
|
# swift_url has a different format so doesn't need to be normalized
|
||||||
url = '%(swift_url)s%(tenant)s' % {'swift_url': CONF.swift_url,
|
url = '%(swift_url)s%(tenant)s' % {'swift_url': CONF.swift_url,
|
||||||
@ -160,7 +160,7 @@ def swift_client(context):
|
|||||||
else:
|
else:
|
||||||
url = get_endpoint(context.service_catalog,
|
url = get_endpoint(context.service_catalog,
|
||||||
service_type=CONF.swift_service_type,
|
service_type=CONF.swift_service_type,
|
||||||
endpoint_region=CONF.os_region_name,
|
endpoint_region=region_name or CONF.os_region_name,
|
||||||
endpoint_type=CONF.swift_endpoint_type)
|
endpoint_type=CONF.swift_endpoint_type)
|
||||||
|
|
||||||
client = Connection(preauthurl=url,
|
client = Connection(preauthurl=url,
|
||||||
@ -170,7 +170,7 @@ def swift_client(context):
|
|||||||
return client
|
return client
|
||||||
|
|
||||||
|
|
||||||
def neutron_client(context):
|
def neutron_client(context, region_name=None):
|
||||||
from neutronclient.v2_0 import client as NeutronClient
|
from neutronclient.v2_0 import client as NeutronClient
|
||||||
if CONF.neutron_url:
|
if CONF.neutron_url:
|
||||||
# neutron endpoint url / publicURL does not include tenant segment
|
# neutron endpoint url / publicURL does not include tenant segment
|
||||||
@ -178,7 +178,7 @@ def neutron_client(context):
|
|||||||
else:
|
else:
|
||||||
url = get_endpoint(context.service_catalog,
|
url = get_endpoint(context.service_catalog,
|
||||||
service_type=CONF.neutron_service_type,
|
service_type=CONF.neutron_service_type,
|
||||||
endpoint_region=CONF.os_region_name,
|
endpoint_region=region_name or CONF.os_region_name,
|
||||||
endpoint_type=CONF.neutron_endpoint_type)
|
endpoint_type=CONF.neutron_endpoint_type)
|
||||||
|
|
||||||
client = NeutronClient.Client(token=context.auth_token,
|
client = NeutronClient.Client(token=context.auth_token,
|
||||||
|
@ -52,7 +52,7 @@ remote_neutron_client = \
|
|||||||
PROXY_AUTH_URL = CONF.trove_auth_url
|
PROXY_AUTH_URL = CONF.trove_auth_url
|
||||||
|
|
||||||
|
|
||||||
def nova_client_trove_admin(context=None):
|
def nova_client_trove_admin(context, region_name=None, compute_url=None):
|
||||||
"""
|
"""
|
||||||
Returns a nova client object with the trove admin credentials
|
Returns a nova client object with the trove admin credentials
|
||||||
:param context: original context from user request
|
:param context: original context from user request
|
||||||
@ -60,16 +60,19 @@ def nova_client_trove_admin(context=None):
|
|||||||
:return novaclient: novaclient with trove admin credentials
|
:return novaclient: novaclient with trove admin credentials
|
||||||
:rtype: novaclient.v1_1.client.Client
|
:rtype: novaclient.v1_1.client.Client
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
compute_url = compute_url or CONF.nova_compute_url
|
||||||
|
|
||||||
client = NovaClient(CONF.nova_proxy_admin_user,
|
client = NovaClient(CONF.nova_proxy_admin_user,
|
||||||
CONF.nova_proxy_admin_pass,
|
CONF.nova_proxy_admin_pass,
|
||||||
CONF.nova_proxy_admin_tenant_name,
|
CONF.nova_proxy_admin_tenant_name,
|
||||||
auth_url=PROXY_AUTH_URL,
|
auth_url=PROXY_AUTH_URL,
|
||||||
service_type=CONF.nova_compute_service_type,
|
service_type=CONF.nova_compute_service_type,
|
||||||
region_name=CONF.os_region_name)
|
region_name=region_name or CONF.os_region_name)
|
||||||
|
|
||||||
if CONF.nova_compute_url and CONF.nova_proxy_admin_tenant_id:
|
if compute_url and CONF.nova_proxy_admin_tenant_id:
|
||||||
client.client.management_url = "%s/%s/" % (
|
client.client.management_url = "%s/%s/" % (
|
||||||
normalize_url(CONF.nova_compute_url),
|
normalize_url(compute_url),
|
||||||
CONF.nova_proxy_admin_tenant_id)
|
CONF.nova_proxy_admin_tenant_id)
|
||||||
|
|
||||||
return client
|
return client
|
||||||
|
@ -155,8 +155,9 @@ class CassandraCluster(models.Cluster):
|
|||||||
availability_zone=instance_az,
|
availability_zone=instance_az,
|
||||||
configuration_id=None,
|
configuration_id=None,
|
||||||
cluster_config=member_config,
|
cluster_config=member_config,
|
||||||
|
modules=instance.get('modules'),
|
||||||
locality=locality,
|
locality=locality,
|
||||||
modules=instance.get('modules'))
|
region_name=instance.get('region_name'))
|
||||||
|
|
||||||
new_instances.append(new_instance)
|
new_instances.append(new_instance)
|
||||||
|
|
||||||
|
@ -120,8 +120,9 @@ class GaleraCommonCluster(cluster_models.Cluster):
|
|||||||
nics=instance.get('nics', None),
|
nics=instance.get('nics', None),
|
||||||
configuration_id=None,
|
configuration_id=None,
|
||||||
cluster_config=member_config,
|
cluster_config=member_config,
|
||||||
|
modules=instance.get('modules'),
|
||||||
locality=locality,
|
locality=locality,
|
||||||
modules=instance.get('modules')
|
region_name=instance.get('region_name')
|
||||||
)
|
)
|
||||||
for instance in instances]
|
for instance in instances]
|
||||||
|
|
||||||
|
@ -93,6 +93,9 @@ class MongoDbCluster(models.Cluster):
|
|||||||
azs = [instance.get('availability_zone', None)
|
azs = [instance.get('availability_zone', None)
|
||||||
for instance in instances]
|
for instance in instances]
|
||||||
|
|
||||||
|
regions = [instance.get('region_name', None)
|
||||||
|
for instance in instances]
|
||||||
|
|
||||||
db_info = models.DBCluster.create(
|
db_info = models.DBCluster.create(
|
||||||
name=name, tenant_id=context.tenant,
|
name=name, tenant_id=context.tenant,
|
||||||
datastore_version_id=datastore_version.id,
|
datastore_version_id=datastore_version.id,
|
||||||
@ -129,8 +132,9 @@ class MongoDbCluster(models.Cluster):
|
|||||||
nics=nics[i],
|
nics=nics[i],
|
||||||
configuration_id=None,
|
configuration_id=None,
|
||||||
cluster_config=member_config,
|
cluster_config=member_config,
|
||||||
|
modules=instances[i].get('modules'),
|
||||||
locality=locality,
|
locality=locality,
|
||||||
modules=instances[i].get('modules'))
|
region_name=regions[i])
|
||||||
|
|
||||||
for i in range(1, num_configsvr + 1):
|
for i in range(1, num_configsvr + 1):
|
||||||
instance_name = "%s-%s-%s" % (name, "configsvr", str(i))
|
instance_name = "%s-%s-%s" % (name, "configsvr", str(i))
|
||||||
@ -144,7 +148,8 @@ class MongoDbCluster(models.Cluster):
|
|||||||
nics=None,
|
nics=None,
|
||||||
configuration_id=None,
|
configuration_id=None,
|
||||||
cluster_config=configsvr_config,
|
cluster_config=configsvr_config,
|
||||||
locality=locality)
|
locality=locality,
|
||||||
|
region_name=regions[i])
|
||||||
|
|
||||||
for i in range(1, num_mongos + 1):
|
for i in range(1, num_mongos + 1):
|
||||||
instance_name = "%s-%s-%s" % (name, "mongos", str(i))
|
instance_name = "%s-%s-%s" % (name, "mongos", str(i))
|
||||||
@ -158,7 +163,8 @@ class MongoDbCluster(models.Cluster):
|
|||||||
nics=None,
|
nics=None,
|
||||||
configuration_id=None,
|
configuration_id=None,
|
||||||
cluster_config=mongos_config,
|
cluster_config=mongos_config,
|
||||||
locality=locality)
|
locality=locality,
|
||||||
|
region_name=regions[i])
|
||||||
|
|
||||||
task_api.load(context, datastore_version.manager).create_cluster(
|
task_api.load(context, datastore_version.manager).create_cluster(
|
||||||
db_info.id)
|
db_info.id)
|
||||||
|
@ -88,8 +88,10 @@ class RedisCluster(models.Cluster):
|
|||||||
cluster_config={
|
cluster_config={
|
||||||
"id": db_info.id,
|
"id": db_info.id,
|
||||||
"instance_type": "member"},
|
"instance_type": "member"},
|
||||||
|
modules=instance.get('modules'),
|
||||||
locality=locality,
|
locality=locality,
|
||||||
modules=instance.get('modules')
|
region_name=instance.get(
|
||||||
|
'region_name')
|
||||||
)
|
)
|
||||||
for instance in instances]
|
for instance in instances]
|
||||||
|
|
||||||
|
@ -103,6 +103,9 @@ class VerticaCluster(models.Cluster):
|
|||||||
azs = [instance.get('availability_zone', None)
|
azs = [instance.get('availability_zone', None)
|
||||||
for instance in instances]
|
for instance in instances]
|
||||||
|
|
||||||
|
regions = [instance.get('region_name', None)
|
||||||
|
for instance in instances]
|
||||||
|
|
||||||
# Creating member instances
|
# Creating member instances
|
||||||
minstances = []
|
minstances = []
|
||||||
for i in range(0, num_instances):
|
for i in range(0, num_instances):
|
||||||
@ -119,7 +122,8 @@ class VerticaCluster(models.Cluster):
|
|||||||
datastore_version, volume_size, None,
|
datastore_version, volume_size, None,
|
||||||
nics=nics[i], availability_zone=azs[i],
|
nics=nics[i], availability_zone=azs[i],
|
||||||
configuration_id=None, cluster_config=member_config,
|
configuration_id=None, cluster_config=member_config,
|
||||||
locality=locality, modules=instances[i].get('modules'))
|
modules=instances[i].get('modules'), locality=locality,
|
||||||
|
region_name=regions[i])
|
||||||
)
|
)
|
||||||
return minstances
|
return minstances
|
||||||
|
|
||||||
|
56
trove/common/trove_remote.py
Normal file
56
trove/common/trove_remote.py
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
# Copyright 2016 Tesora Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo_utils.importutils import import_class
|
||||||
|
|
||||||
|
from trove.common import cfg
|
||||||
|
from trove.common.remote import get_endpoint
|
||||||
|
from trove.common.remote import normalize_url
|
||||||
|
|
||||||
|
from troveclient.v1 import client as TroveClient
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
PROXY_AUTH_URL = CONF.trove_auth_url
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
NOTE(mwj, Apr 2016):
|
||||||
|
This module is separated from remote.py because remote.py is used
|
||||||
|
on the Trove guest, but the trove client is not installed on the guest,
|
||||||
|
so the imports here would fail.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def trove_client(context, region_name=None):
|
||||||
|
if CONF.trove_url:
|
||||||
|
url = '%(url)s%(tenant)s' % {
|
||||||
|
'url': normalize_url(CONF.trove_url),
|
||||||
|
'tenant': context.tenant}
|
||||||
|
else:
|
||||||
|
url = get_endpoint(context.service_catalog,
|
||||||
|
service_type=CONF.trove_service_type,
|
||||||
|
endpoint_region=region_name or CONF.os_region_name,
|
||||||
|
endpoint_type=CONF.trove_endpoint_type)
|
||||||
|
|
||||||
|
client = TroveClient.Client(context.user, context.auth_token,
|
||||||
|
project_id=context.tenant,
|
||||||
|
auth_url=PROXY_AUTH_URL)
|
||||||
|
client.client.auth_token = context.auth_token
|
||||||
|
client.client.management_url = url
|
||||||
|
return client
|
||||||
|
|
||||||
|
|
||||||
|
create_trove_client = import_class(CONF.remote_trove_client)
|
35
trove/db/sqlalchemy/migrate_repo/versions/039_region.py
Normal file
35
trove/db/sqlalchemy/migrate_repo/versions/039_region.py
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
# Copyright 2016 Tesora Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo_log import log as logging
|
||||||
|
from sqlalchemy.schema import Column
|
||||||
|
from sqlalchemy.schema import MetaData
|
||||||
|
|
||||||
|
from trove.common import cfg
|
||||||
|
from trove.db.sqlalchemy.migrate_repo.schema import String
|
||||||
|
from trove.db.sqlalchemy.migrate_repo.schema import Table
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema')
|
||||||
|
|
||||||
|
meta = MetaData()
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
instances.create_column(Column('region_id', String(255)))
|
||||||
|
instances.update().values(region_id=CONF.os_region_name).execute()
|
@ -33,7 +33,7 @@ CONF = cfg.CONF
|
|||||||
def load_mgmt_instances(context, deleted=None, client=None,
|
def load_mgmt_instances(context, deleted=None, client=None,
|
||||||
include_clustered=None):
|
include_clustered=None):
|
||||||
if not client:
|
if not client:
|
||||||
client = remote.create_nova_client(context)
|
client = remote.create_nova_client(context, CONF.os_region_name)
|
||||||
try:
|
try:
|
||||||
mgmt_servers = client.rdservers.list()
|
mgmt_servers = client.rdservers.list()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
@ -56,7 +56,7 @@ def load_mgmt_instance(cls, context, id, include_deleted):
|
|||||||
try:
|
try:
|
||||||
instance = load_instance(cls, context, id, needs_server=True,
|
instance = load_instance(cls, context, id, needs_server=True,
|
||||||
include_deleted=include_deleted)
|
include_deleted=include_deleted)
|
||||||
client = remote.create_nova_client(context)
|
client = remote.create_nova_client(context, CONF.os_region_name)
|
||||||
try:
|
try:
|
||||||
server = client.rdservers.get(instance.server_id)
|
server = client.rdservers.get(instance.server_id)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
@ -169,7 +169,7 @@ def _load_servers(instances, find_server):
|
|||||||
server = find_server(db.id, db.compute_instance_id)
|
server = find_server(db.id, db.compute_instance_id)
|
||||||
instance.server = server
|
instance.server = server
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.error(ex)
|
LOG.exception(ex)
|
||||||
return instances
|
return instances
|
||||||
|
|
||||||
|
|
||||||
|
@ -22,6 +22,8 @@ import trove.common.apischema as apischema
|
|||||||
from trove.common.auth import admin_context
|
from trove.common.auth import admin_context
|
||||||
from trove.common import exception
|
from trove.common import exception
|
||||||
from trove.common.i18n import _
|
from trove.common.i18n import _
|
||||||
|
from trove.common import notification
|
||||||
|
from trove.common.notification import StartNotification
|
||||||
from trove.common import wsgi
|
from trove.common import wsgi
|
||||||
from trove.extensions.mgmt.instances import models
|
from trove.extensions.mgmt.instances import models
|
||||||
from trove.extensions.mgmt.instances import views
|
from trove.extensions.mgmt.instances import views
|
||||||
@ -63,7 +65,7 @@ class MgmtInstanceController(InstanceController):
|
|||||||
instances = models.load_mgmt_instances(
|
instances = models.load_mgmt_instances(
|
||||||
context, deleted=deleted, include_clustered=include_clustered)
|
context, deleted=deleted, include_clustered=include_clustered)
|
||||||
except nova_exceptions.ClientException as e:
|
except nova_exceptions.ClientException as e:
|
||||||
LOG.error(e)
|
LOG.exception(e)
|
||||||
return wsgi.Result(str(e), 403)
|
return wsgi.Result(str(e), 403)
|
||||||
|
|
||||||
view_cls = views.MgmtInstancesView
|
view_cls = views.MgmtInstancesView
|
||||||
@ -118,28 +120,32 @@ class MgmtInstanceController(InstanceController):
|
|||||||
raise exception.BadRequest(msg)
|
raise exception.BadRequest(msg)
|
||||||
|
|
||||||
if selected_action:
|
if selected_action:
|
||||||
return selected_action(context, instance, body)
|
return selected_action(context, instance, req, body)
|
||||||
else:
|
else:
|
||||||
raise exception.BadRequest(_("Invalid request body."))
|
raise exception.BadRequest(_("Invalid request body."))
|
||||||
|
|
||||||
def _action_stop(self, context, instance, body):
|
def _action_stop(self, context, instance, req, body):
|
||||||
LOG.debug("Stopping MySQL on instance %s." % instance.id)
|
LOG.debug("Stopping MySQL on instance %s." % instance.id)
|
||||||
instance.stop_db()
|
instance.stop_db()
|
||||||
return wsgi.Result(None, 202)
|
return wsgi.Result(None, 202)
|
||||||
|
|
||||||
def _action_reboot(self, context, instance, body):
|
def _action_reboot(self, context, instance, req, body):
|
||||||
LOG.debug("Rebooting instance %s." % instance.id)
|
LOG.debug("Rebooting instance %s." % instance.id)
|
||||||
instance.reboot()
|
instance.reboot()
|
||||||
return wsgi.Result(None, 202)
|
return wsgi.Result(None, 202)
|
||||||
|
|
||||||
def _action_migrate(self, context, instance, body):
|
def _action_migrate(self, context, instance, req, body):
|
||||||
LOG.debug("Migrating instance %s." % instance.id)
|
LOG.debug("Migrating instance %s." % instance.id)
|
||||||
LOG.debug("body['migrate']= %s" % body['migrate'])
|
LOG.debug("body['migrate']= %s" % body['migrate'])
|
||||||
host = body['migrate'].get('host', None)
|
host = body['migrate'].get('host', None)
|
||||||
|
|
||||||
|
context.notification = notification.DBaaSInstanceMigrate(context,
|
||||||
|
request=req)
|
||||||
|
with StartNotification(context, host=host):
|
||||||
instance.migrate(host)
|
instance.migrate(host)
|
||||||
return wsgi.Result(None, 202)
|
return wsgi.Result(None, 202)
|
||||||
|
|
||||||
def _action_reset_task_status(self, context, instance, body):
|
def _action_reset_task_status(self, context, instance, req, body):
|
||||||
LOG.debug("Setting Task-Status to NONE on instance %s." %
|
LOG.debug("Setting Task-Status to NONE on instance %s." %
|
||||||
instance.id)
|
instance.id)
|
||||||
instance.reset_task_status()
|
instance.reset_task_status()
|
||||||
@ -163,7 +169,7 @@ class MgmtInstanceController(InstanceController):
|
|||||||
try:
|
try:
|
||||||
instance_models.Instance.load(context=context, id=id)
|
instance_models.Instance.load(context=context, id=id)
|
||||||
except exception.TroveError as e:
|
except exception.TroveError as e:
|
||||||
LOG.error(e)
|
LOG.exception(e)
|
||||||
return wsgi.Result(str(e), 404)
|
return wsgi.Result(str(e), 404)
|
||||||
rhv = views.RootHistoryView(id)
|
rhv = views.RootHistoryView(id)
|
||||||
reh = mysql_models.RootHistory.load(context=context, instance_id=id)
|
reh = mysql_models.RootHistory.load(context=context, instance_id=id)
|
||||||
|
@ -41,8 +41,8 @@ class StorageDevice(object):
|
|||||||
class StorageDevices(object):
|
class StorageDevices(object):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load(context):
|
def load(context, region_name):
|
||||||
client = create_cinder_client(context)
|
client = create_cinder_client(context, region_name)
|
||||||
rdstorages = client.rdstorage.list()
|
rdstorages = client.rdstorage.list()
|
||||||
for rdstorage in rdstorages:
|
for rdstorage in rdstorages:
|
||||||
LOG.debug("rdstorage=" + str(rdstorage))
|
LOG.debug("rdstorage=" + str(rdstorage))
|
||||||
|
@ -17,11 +17,13 @@
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from trove.common.auth import admin_context
|
from trove.common.auth import admin_context
|
||||||
|
from trove.common import cfg
|
||||||
from trove.common.i18n import _
|
from trove.common.i18n import _
|
||||||
from trove.common import wsgi
|
from trove.common import wsgi
|
||||||
from trove.extensions.mgmt.volume import models
|
from trove.extensions.mgmt.volume import models
|
||||||
from trove.extensions.mgmt.volume import views
|
from trove.extensions.mgmt.volume import views
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@ -34,5 +36,5 @@ class StorageController(wsgi.Controller):
|
|||||||
LOG.info(_("req : '%s'\n\n") % req)
|
LOG.info(_("req : '%s'\n\n") % req)
|
||||||
LOG.info(_("Indexing storage info for tenant '%s'") % tenant_id)
|
LOG.info(_("Indexing storage info for tenant '%s'") % tenant_id)
|
||||||
context = req.environ[wsgi.CONTEXT_KEY]
|
context = req.environ[wsgi.CONTEXT_KEY]
|
||||||
storages = models.StorageDevices.load(context)
|
storages = models.StorageDevices.load(context, CONF.os_region_name)
|
||||||
return wsgi.Result(views.StoragesView(storages).data(), 200)
|
return wsgi.Result(views.StoragesView(storages).data(), 200)
|
||||||
|
@ -49,11 +49,10 @@ class SecurityGroup(DatabaseModelBase):
|
|||||||
.get_instance_id_by_security_group_id(self.id)
|
.get_instance_id_by_security_group_id(self.id)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def create_sec_group(cls, name, description, context):
|
def create_sec_group(cls, name, description, context, region_name):
|
||||||
try:
|
try:
|
||||||
remote_sec_group = RemoteSecurityGroup.create(name,
|
remote_sec_group = RemoteSecurityGroup.create(
|
||||||
description,
|
name, description, context, region_name)
|
||||||
context)
|
|
||||||
|
|
||||||
if not remote_sec_group:
|
if not remote_sec_group:
|
||||||
raise exception.SecurityGroupCreationError(
|
raise exception.SecurityGroupCreationError(
|
||||||
@ -71,11 +70,12 @@ class SecurityGroup(DatabaseModelBase):
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def create_for_instance(cls, instance_id, context):
|
def create_for_instance(cls, instance_id, context, region_name):
|
||||||
# Create a new security group
|
# Create a new security group
|
||||||
name = "%s_%s" % (CONF.trove_security_group_name_prefix, instance_id)
|
name = "%s_%s" % (CONF.trove_security_group_name_prefix, instance_id)
|
||||||
description = _("Security Group for %s") % instance_id
|
description = _("Security Group for %s") % instance_id
|
||||||
sec_group = cls.create_sec_group(name, description, context)
|
sec_group = cls.create_sec_group(name, description, context,
|
||||||
|
region_name)
|
||||||
|
|
||||||
# Currently this locked down by default, since we don't create any
|
# Currently this locked down by default, since we don't create any
|
||||||
# default security group rules for the security group.
|
# default security group rules for the security group.
|
||||||
@ -101,14 +101,14 @@ class SecurityGroup(DatabaseModelBase):
|
|||||||
return SecurityGroupRule.find_all(group_id=self.id,
|
return SecurityGroupRule.find_all(group_id=self.id,
|
||||||
deleted=False)
|
deleted=False)
|
||||||
|
|
||||||
def delete(self, context):
|
def delete(self, context, region_name):
|
||||||
try:
|
try:
|
||||||
sec_group_rules = self.get_rules()
|
sec_group_rules = self.get_rules()
|
||||||
if sec_group_rules:
|
if sec_group_rules:
|
||||||
for rule in sec_group_rules:
|
for rule in sec_group_rules:
|
||||||
rule.delete(context)
|
rule.delete(context, region_name)
|
||||||
|
|
||||||
RemoteSecurityGroup.delete(self.id, context)
|
RemoteSecurityGroup.delete(self.id, context, region_name)
|
||||||
super(SecurityGroup, self).delete()
|
super(SecurityGroup, self).delete()
|
||||||
|
|
||||||
except exception.TroveError:
|
except exception.TroveError:
|
||||||
@ -116,7 +116,7 @@ class SecurityGroup(DatabaseModelBase):
|
|||||||
raise exception.TroveError("Failed to delete Security Group")
|
raise exception.TroveError("Failed to delete Security Group")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def delete_for_instance(cls, instance_id, context):
|
def delete_for_instance(cls, instance_id, context, region_name):
|
||||||
try:
|
try:
|
||||||
association = SecurityGroupInstanceAssociation.find_by(
|
association = SecurityGroupInstanceAssociation.find_by(
|
||||||
instance_id=instance_id,
|
instance_id=instance_id,
|
||||||
@ -124,7 +124,7 @@ class SecurityGroup(DatabaseModelBase):
|
|||||||
if association:
|
if association:
|
||||||
sec_group = association.get_security_group()
|
sec_group = association.get_security_group()
|
||||||
if sec_group:
|
if sec_group:
|
||||||
sec_group.delete(context)
|
sec_group.delete(context, region_name)
|
||||||
association.delete()
|
association.delete()
|
||||||
except (exception.ModelNotFoundError,
|
except (exception.ModelNotFoundError,
|
||||||
exception.TroveError):
|
exception.TroveError):
|
||||||
@ -140,7 +140,7 @@ class SecurityGroupRule(DatabaseModelBase):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def create_sec_group_rule(cls, sec_group, protocol, from_port,
|
def create_sec_group_rule(cls, sec_group, protocol, from_port,
|
||||||
to_port, cidr, context):
|
to_port, cidr, context, region_name):
|
||||||
try:
|
try:
|
||||||
remote_rule_id = RemoteSecurityGroup.add_rule(
|
remote_rule_id = RemoteSecurityGroup.add_rule(
|
||||||
sec_group_id=sec_group['id'],
|
sec_group_id=sec_group['id'],
|
||||||
@ -148,7 +148,8 @@ class SecurityGroupRule(DatabaseModelBase):
|
|||||||
from_port=from_port,
|
from_port=from_port,
|
||||||
to_port=to_port,
|
to_port=to_port,
|
||||||
cidr=cidr,
|
cidr=cidr,
|
||||||
context=context)
|
context=context,
|
||||||
|
region_name=region_name)
|
||||||
|
|
||||||
if not remote_rule_id:
|
if not remote_rule_id:
|
||||||
raise exception.SecurityGroupRuleCreationError(
|
raise exception.SecurityGroupRuleCreationError(
|
||||||
@ -172,10 +173,10 @@ class SecurityGroupRule(DatabaseModelBase):
|
|||||||
tenant_id=tenant_id,
|
tenant_id=tenant_id,
|
||||||
deleted=False)
|
deleted=False)
|
||||||
|
|
||||||
def delete(self, context):
|
def delete(self, context, region_name):
|
||||||
try:
|
try:
|
||||||
# Delete Remote Security Group Rule
|
# Delete Remote Security Group Rule
|
||||||
RemoteSecurityGroup.delete_rule(self.id, context)
|
RemoteSecurityGroup.delete_rule(self.id, context, region_name)
|
||||||
super(SecurityGroupRule, self).delete()
|
super(SecurityGroupRule, self).delete()
|
||||||
except exception.TroveError:
|
except exception.TroveError:
|
||||||
LOG.exception(_('Failed to delete security group.'))
|
LOG.exception(_('Failed to delete security group.'))
|
||||||
@ -210,42 +211,44 @@ class RemoteSecurityGroup(NetworkRemoteModelBase):
|
|||||||
|
|
||||||
_data_fields = ['id', 'name', 'description', 'rules']
|
_data_fields = ['id', 'name', 'description', 'rules']
|
||||||
|
|
||||||
def __init__(self, security_group=None, id=None, context=None):
|
def __init__(self, security_group=None, id=None, context=None,
|
||||||
|
region_name=None):
|
||||||
if id is None and security_group is None:
|
if id is None and security_group is None:
|
||||||
msg = _("Security Group does not have id defined!")
|
msg = _("Security Group does not have id defined!")
|
||||||
raise exception.InvalidModelError(msg)
|
raise exception.InvalidModelError(msg)
|
||||||
elif security_group is None:
|
elif security_group is None:
|
||||||
driver = self.get_driver(context)
|
driver = self.get_driver(context,
|
||||||
|
region_name or CONF.os_region_name)
|
||||||
self._data_object = driver.get_sec_group_by_id(group_id=id)
|
self._data_object = driver.get_sec_group_by_id(group_id=id)
|
||||||
else:
|
else:
|
||||||
self._data_object = security_group
|
self._data_object = security_group
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def create(cls, name, description, context):
|
def create(cls, name, description, context, region_name):
|
||||||
"""Creates a new Security Group."""
|
"""Creates a new Security Group."""
|
||||||
driver = cls.get_driver(context)
|
driver = cls.get_driver(context, region_name)
|
||||||
sec_group = driver.create_security_group(
|
sec_group = driver.create_security_group(
|
||||||
name=name, description=description)
|
name=name, description=description)
|
||||||
return RemoteSecurityGroup(security_group=sec_group)
|
return RemoteSecurityGroup(security_group=sec_group)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def delete(cls, sec_group_id, context):
|
def delete(cls, sec_group_id, context, region_name):
|
||||||
"""Deletes a Security Group."""
|
"""Deletes a Security Group."""
|
||||||
driver = cls.get_driver(context)
|
driver = cls.get_driver(context, region_name)
|
||||||
driver.delete_security_group(sec_group_id)
|
driver.delete_security_group(sec_group_id)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def add_rule(cls, sec_group_id, protocol, from_port,
|
def add_rule(cls, sec_group_id, protocol, from_port,
|
||||||
to_port, cidr, context):
|
to_port, cidr, context, region_name):
|
||||||
"""Adds a new rule to an existing security group."""
|
"""Adds a new rule to an existing security group."""
|
||||||
driver = cls.get_driver(context)
|
driver = cls.get_driver(context, region_name)
|
||||||
sec_group_rule = driver.add_security_group_rule(
|
sec_group_rule = driver.add_security_group_rule(
|
||||||
sec_group_id, protocol, from_port, to_port, cidr)
|
sec_group_id, protocol, from_port, to_port, cidr)
|
||||||
|
|
||||||
return sec_group_rule.id
|
return sec_group_rule.id
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def delete_rule(cls, sec_group_rule_id, context):
|
def delete_rule(cls, sec_group_rule_id, context, region_name):
|
||||||
"""Deletes a rule from an existing security group."""
|
"""Deletes a rule from an existing security group."""
|
||||||
driver = cls.get_driver(context)
|
driver = cls.get_driver(context, region_name)
|
||||||
driver.delete_security_group_rule(sec_group_rule_id)
|
driver.delete_security_group_rule(sec_group_rule_id)
|
||||||
|
@ -77,7 +77,7 @@ class SecurityGroupRuleController(wsgi.Controller):
|
|||||||
"exist or does not belong to tenant %s") % tenant_id)
|
"exist or does not belong to tenant %s") % tenant_id)
|
||||||
raise exception.Forbidden("Unauthorized")
|
raise exception.Forbidden("Unauthorized")
|
||||||
|
|
||||||
sec_group_rule.delete(context)
|
sec_group_rule.delete(context, CONF.os_region_name)
|
||||||
sec_group.save()
|
sec_group.save()
|
||||||
return wsgi.Result(None, 204)
|
return wsgi.Result(None, 204)
|
||||||
|
|
||||||
@ -106,7 +106,8 @@ class SecurityGroupRuleController(wsgi.Controller):
|
|||||||
from_, to_ = utils.gen_ports(port_or_range)
|
from_, to_ = utils.gen_ports(port_or_range)
|
||||||
rule = models.SecurityGroupRule.create_sec_group_rule(
|
rule = models.SecurityGroupRule.create_sec_group_rule(
|
||||||
sec_group, protocol, int(from_), int(to_),
|
sec_group, protocol, int(from_), int(to_),
|
||||||
body['security_group_rule']['cidr'], context)
|
body['security_group_rule']['cidr'], context,
|
||||||
|
CONF.os_region_name)
|
||||||
rules.append(rule)
|
rules.append(rule)
|
||||||
except (ValueError, AttributeError) as e:
|
except (ValueError, AttributeError) as e:
|
||||||
raise exception.BadRequest(msg=str(e))
|
raise exception.BadRequest(msg=str(e))
|
||||||
|
@ -27,6 +27,7 @@ from oslo_log import log as logging
|
|||||||
from trove.backup.models import Backup
|
from trove.backup.models import Backup
|
||||||
from trove.common import cfg
|
from trove.common import cfg
|
||||||
from trove.common import exception
|
from trove.common import exception
|
||||||
|
from trove.common.glance_remote import create_glance_client
|
||||||
from trove.common.i18n import _, _LE, _LI, _LW
|
from trove.common.i18n import _, _LE, _LI, _LW
|
||||||
import trove.common.instance as tr_instance
|
import trove.common.instance as tr_instance
|
||||||
from trove.common.notification import StartNotification
|
from trove.common.notification import StartNotification
|
||||||
@ -36,6 +37,7 @@ from trove.common.remote import create_guest_client
|
|||||||
from trove.common.remote import create_nova_client
|
from trove.common.remote import create_nova_client
|
||||||
from trove.common import server_group as srv_grp
|
from trove.common import server_group as srv_grp
|
||||||
from trove.common import template
|
from trove.common import template
|
||||||
|
from trove.common.trove_remote import create_trove_client
|
||||||
from trove.common import utils
|
from trove.common import utils
|
||||||
from trove.configuration.models import Configuration
|
from trove.configuration.models import Configuration
|
||||||
from trove.datastore import models as datastore_models
|
from trove.datastore import models as datastore_models
|
||||||
@ -62,7 +64,7 @@ def filter_ips(ips, white_list_regex, black_list_regex):
|
|||||||
and not re.search(black_list_regex, ip)]
|
and not re.search(black_list_regex, ip)]
|
||||||
|
|
||||||
|
|
||||||
def load_server(context, instance_id, server_id):
|
def load_server(context, instance_id, server_id, region_name):
|
||||||
"""
|
"""
|
||||||
Loads a server or raises an exception.
|
Loads a server or raises an exception.
|
||||||
:param context: request context used to access nova
|
:param context: request context used to access nova
|
||||||
@ -74,7 +76,7 @@ def load_server(context, instance_id, server_id):
|
|||||||
:type server_id: unicode
|
:type server_id: unicode
|
||||||
:rtype: novaclient.v2.servers.Server
|
:rtype: novaclient.v2.servers.Server
|
||||||
"""
|
"""
|
||||||
client = create_nova_client(context)
|
client = create_nova_client(context, region_name=region_name)
|
||||||
try:
|
try:
|
||||||
server = client.servers.get(server_id)
|
server = client.servers.get(server_id)
|
||||||
except nova_exceptions.NotFound:
|
except nova_exceptions.NotFound:
|
||||||
@ -120,7 +122,7 @@ def load_simple_instance_server_status(context, db_info):
|
|||||||
db_info.server_status = "BUILD"
|
db_info.server_status = "BUILD"
|
||||||
db_info.addresses = {}
|
db_info.addresses = {}
|
||||||
else:
|
else:
|
||||||
client = create_nova_client(context)
|
client = create_nova_client(context, db_info.region_id)
|
||||||
try:
|
try:
|
||||||
server = client.servers.get(db_info.compute_instance_id)
|
server = client.servers.get(db_info.compute_instance_id)
|
||||||
db_info.server_status = server.status
|
db_info.server_status = server.status
|
||||||
@ -427,6 +429,10 @@ class SimpleInstance(object):
|
|||||||
def shard_id(self):
|
def shard_id(self):
|
||||||
return self.db_info.shard_id
|
return self.db_info.shard_id
|
||||||
|
|
||||||
|
@property
|
||||||
|
def region_name(self):
|
||||||
|
return self.db_info.region_id
|
||||||
|
|
||||||
|
|
||||||
class DetailInstance(SimpleInstance):
|
class DetailInstance(SimpleInstance):
|
||||||
"""A detailed view of an Instance.
|
"""A detailed view of an Instance.
|
||||||
@ -511,7 +517,8 @@ def load_instance(cls, context, id, needs_server=False,
|
|||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
server = load_server(context, db_info.id,
|
server = load_server(context, db_info.id,
|
||||||
db_info.compute_instance_id)
|
db_info.compute_instance_id,
|
||||||
|
region_name=db_info.region_id)
|
||||||
# TODO(tim.simpson): Remove this hack when we have notifications!
|
# TODO(tim.simpson): Remove this hack when we have notifications!
|
||||||
db_info.server_status = server.status
|
db_info.server_status = server.status
|
||||||
db_info.addresses = server.addresses
|
db_info.addresses = server.addresses
|
||||||
@ -547,7 +554,7 @@ def load_guest_info(instance, context, id):
|
|||||||
instance.volume_used = volume_info['used']
|
instance.volume_used = volume_info['used']
|
||||||
instance.volume_total = volume_info['total']
|
instance.volume_total = volume_info['total']
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(e)
|
LOG.exception(e)
|
||||||
return instance
|
return instance
|
||||||
|
|
||||||
|
|
||||||
@ -646,8 +653,8 @@ class BaseInstance(SimpleInstance):
|
|||||||
self.set_instance_fault_deleted()
|
self.set_instance_fault_deleted()
|
||||||
# Delete associated security group
|
# Delete associated security group
|
||||||
if CONF.trove_security_groups_support:
|
if CONF.trove_security_groups_support:
|
||||||
SecurityGroup.delete_for_instance(self.db_info.id,
|
SecurityGroup.delete_for_instance(self.db_info.id, self.context,
|
||||||
self.context)
|
self.db_info.region_id)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def guest(self):
|
def guest(self):
|
||||||
@ -658,7 +665,8 @@ class BaseInstance(SimpleInstance):
|
|||||||
@property
|
@property
|
||||||
def nova_client(self):
|
def nova_client(self):
|
||||||
if not self._nova_client:
|
if not self._nova_client:
|
||||||
self._nova_client = create_nova_client(self.context)
|
self._nova_client = create_nova_client(
|
||||||
|
self.context, region_name=self.db_info.region_id)
|
||||||
return self._nova_client
|
return self._nova_client
|
||||||
|
|
||||||
def update_db(self, **values):
|
def update_db(self, **values):
|
||||||
@ -684,7 +692,8 @@ class BaseInstance(SimpleInstance):
|
|||||||
@property
|
@property
|
||||||
def volume_client(self):
|
def volume_client(self):
|
||||||
if not self._volume_client:
|
if not self._volume_client:
|
||||||
self._volume_client = create_cinder_client(self.context)
|
self._volume_client = create_cinder_client(
|
||||||
|
self.context, region_name=self.db_info.region_id)
|
||||||
return self._volume_client
|
return self._volume_client
|
||||||
|
|
||||||
def reset_task_status(self):
|
def reset_task_status(self):
|
||||||
@ -773,13 +782,61 @@ class Instance(BuiltInstance):
|
|||||||
datastore_manager)
|
datastore_manager)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _validate_remote_datastore(cls, context, region_name, flavor,
|
||||||
|
datastore, datastore_version):
|
||||||
|
remote_nova_client = create_nova_client(context,
|
||||||
|
region_name=region_name)
|
||||||
|
try:
|
||||||
|
remote_flavor = remote_nova_client.flavors.get(flavor.id)
|
||||||
|
if (flavor.ram != remote_flavor.ram or
|
||||||
|
flavor.vcpus != remote_flavor.vcpus):
|
||||||
|
raise exception.TroveError(
|
||||||
|
"Flavors differ between regions"
|
||||||
|
" %(local)s and %(remote)s." %
|
||||||
|
{'local': CONF.os_region_name, 'remote': region_name})
|
||||||
|
except nova_exceptions.NotFound:
|
||||||
|
raise exception.TroveError(
|
||||||
|
"Flavors %(flavor)s not found in region %(remote)s."
|
||||||
|
% {'flavor': flavor.id, 'remote': region_name})
|
||||||
|
|
||||||
|
remote_trove_client = create_trove_client(
|
||||||
|
context, region_name=region_name)
|
||||||
|
try:
|
||||||
|
remote_ds_ver = remote_trove_client.datastore_versions.get(
|
||||||
|
datastore.name, datastore_version.name)
|
||||||
|
if datastore_version.name != remote_ds_ver.name:
|
||||||
|
raise exception.TroveError(
|
||||||
|
"Datastore versions differ between regions "
|
||||||
|
"%(local)s and %(remote)s." %
|
||||||
|
{'local': CONF.os_region_name, 'remote': region_name})
|
||||||
|
except exception.NotFound:
|
||||||
|
raise exception.TroveError(
|
||||||
|
"Datastore Version %(dsv)s not found in region %(remote)s."
|
||||||
|
% {'dsv': datastore_version.name, 'remote': region_name})
|
||||||
|
|
||||||
|
glance_client = create_glance_client(context)
|
||||||
|
local_image = glance_client.images.get(datastore_version.image)
|
||||||
|
remote_glance_client = create_glance_client(
|
||||||
|
context, region_name=region_name)
|
||||||
|
remote_image = remote_glance_client.images.get(
|
||||||
|
remote_ds_ver.image)
|
||||||
|
if local_image.checksum != remote_image.checksum:
|
||||||
|
raise exception.TroveError(
|
||||||
|
"Images for Datastore %(ds)s do not match"
|
||||||
|
"between regions %(local)s and %(remote)s." %
|
||||||
|
{'ds': datastore.name, 'local': CONF.os_region_name,
|
||||||
|
'remote': region_name})
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def create(cls, context, name, flavor_id, image_id, databases, users,
|
def create(cls, context, name, flavor_id, image_id, databases, users,
|
||||||
datastore, datastore_version, volume_size, backup_id,
|
datastore, datastore_version, volume_size, backup_id,
|
||||||
availability_zone=None, nics=None,
|
availability_zone=None, nics=None,
|
||||||
configuration_id=None, slave_of_id=None, cluster_config=None,
|
configuration_id=None, slave_of_id=None, cluster_config=None,
|
||||||
replica_count=None, volume_type=None, modules=None,
|
replica_count=None, volume_type=None, modules=None,
|
||||||
locality=None):
|
locality=None, region_name=None):
|
||||||
|
|
||||||
|
region_name = region_name or CONF.os_region_name
|
||||||
|
|
||||||
call_args = {
|
call_args = {
|
||||||
'name': name,
|
'name': name,
|
||||||
@ -788,6 +845,7 @@ class Instance(BuiltInstance):
|
|||||||
'datastore_version': datastore_version.name,
|
'datastore_version': datastore_version.name,
|
||||||
'image_id': image_id,
|
'image_id': image_id,
|
||||||
'availability_zone': availability_zone,
|
'availability_zone': availability_zone,
|
||||||
|
'region_name': region_name,
|
||||||
}
|
}
|
||||||
|
|
||||||
# All nova flavors are permitted for a datastore-version unless one
|
# All nova flavors are permitted for a datastore-version unless one
|
||||||
@ -812,6 +870,12 @@ class Instance(BuiltInstance):
|
|||||||
except nova_exceptions.NotFound:
|
except nova_exceptions.NotFound:
|
||||||
raise exception.FlavorNotFound(uuid=flavor_id)
|
raise exception.FlavorNotFound(uuid=flavor_id)
|
||||||
|
|
||||||
|
# If a different region is specified for the instance, ensure
|
||||||
|
# that the flavor and image are the same in both regions
|
||||||
|
if region_name and region_name != CONF.os_region_name:
|
||||||
|
cls._validate_remote_datastore(context, region_name, flavor,
|
||||||
|
datastore, datastore_version)
|
||||||
|
|
||||||
deltas = {'instances': 1}
|
deltas = {'instances': 1}
|
||||||
volume_support = datastore_cfg.volume_support
|
volume_support = datastore_cfg.volume_support
|
||||||
if volume_support:
|
if volume_support:
|
||||||
@ -945,10 +1009,12 @@ class Instance(BuiltInstance):
|
|||||||
task_status=InstanceTasks.BUILDING,
|
task_status=InstanceTasks.BUILDING,
|
||||||
configuration_id=configuration_id,
|
configuration_id=configuration_id,
|
||||||
slave_of_id=slave_of_id, cluster_id=cluster_id,
|
slave_of_id=slave_of_id, cluster_id=cluster_id,
|
||||||
shard_id=shard_id, type=instance_type)
|
shard_id=shard_id, type=instance_type,
|
||||||
|
region_id=region_name)
|
||||||
LOG.debug("Tenant %(tenant)s created new Trove instance "
|
LOG.debug("Tenant %(tenant)s created new Trove instance "
|
||||||
"%(db)s.",
|
"%(db)s in region %(region)s.",
|
||||||
{'tenant': context.tenant, 'db': db_info.id})
|
{'tenant': context.tenant, 'db': db_info.id,
|
||||||
|
'region': region_name})
|
||||||
|
|
||||||
instance_id = db_info.id
|
instance_id = db_info.id
|
||||||
cls.add_instance_modules(context, instance_id, modules)
|
cls.add_instance_modules(context, instance_id, modules)
|
||||||
@ -1009,8 +1075,7 @@ class Instance(BuiltInstance):
|
|||||||
context, instance_id, module.id, module.md5)
|
context, instance_id, module.id, module.md5)
|
||||||
|
|
||||||
def get_flavor(self):
|
def get_flavor(self):
|
||||||
client = create_nova_client(self.context)
|
return self.nova_client.flavors.get(self.flavor_id)
|
||||||
return client.flavors.get(self.flavor_id)
|
|
||||||
|
|
||||||
def get_default_configuration_template(self):
|
def get_default_configuration_template(self):
|
||||||
flavor = self.get_flavor()
|
flavor = self.get_flavor()
|
||||||
@ -1036,13 +1101,12 @@ class Instance(BuiltInstance):
|
|||||||
raise exception.BadRequest(_("The new flavor id must be different "
|
raise exception.BadRequest(_("The new flavor id must be different "
|
||||||
"than the current flavor id of '%s'.")
|
"than the current flavor id of '%s'.")
|
||||||
% self.flavor_id)
|
% self.flavor_id)
|
||||||
client = create_nova_client(self.context)
|
|
||||||
try:
|
try:
|
||||||
new_flavor = client.flavors.get(new_flavor_id)
|
new_flavor = self.nova_client.flavors.get(new_flavor_id)
|
||||||
except nova_exceptions.NotFound:
|
except nova_exceptions.NotFound:
|
||||||
raise exception.FlavorNotFound(uuid=new_flavor_id)
|
raise exception.FlavorNotFound(uuid=new_flavor_id)
|
||||||
|
|
||||||
old_flavor = client.flavors.get(self.flavor_id)
|
old_flavor = self.nova_client.flavors.get(self.flavor_id)
|
||||||
if self.volume_support:
|
if self.volume_support:
|
||||||
if new_flavor.ephemeral != 0:
|
if new_flavor.ephemeral != 0:
|
||||||
raise exception.LocalStorageNotSupported()
|
raise exception.LocalStorageNotSupported()
|
||||||
@ -1322,8 +1386,8 @@ class Instances(object):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def load(context, include_clustered, instance_ids=None):
|
def load(context, include_clustered, instance_ids=None):
|
||||||
|
|
||||||
def load_simple_instance(context, db, status, **kwargs):
|
def load_simple_instance(context, db_info, status, **kwargs):
|
||||||
return SimpleInstance(context, db, status)
|
return SimpleInstance(context, db_info, status)
|
||||||
|
|
||||||
if context is None:
|
if context is None:
|
||||||
raise TypeError("Argument context not defined.")
|
raise TypeError("Argument context not defined.")
|
||||||
@ -1375,7 +1439,14 @@ class Instances(object):
|
|||||||
db.addresses = {}
|
db.addresses = {}
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
|
if (not db.region_id
|
||||||
|
or db.region_id == CONF.os_region_name):
|
||||||
server = find_server(db.id, db.compute_instance_id)
|
server = find_server(db.id, db.compute_instance_id)
|
||||||
|
else:
|
||||||
|
nova_client = create_nova_client(
|
||||||
|
context, region_name=db.region_id)
|
||||||
|
server = nova_client.servers.get(
|
||||||
|
db.compute_instance_id)
|
||||||
db.server_status = server.status
|
db.server_status = server.status
|
||||||
db.addresses = server.addresses
|
db.addresses = server.addresses
|
||||||
except exception.ComputeInstanceNotFound:
|
except exception.ComputeInstanceNotFound:
|
||||||
@ -1402,13 +1473,12 @@ class Instances(object):
|
|||||||
|
|
||||||
|
|
||||||
class DBInstance(dbmodels.DatabaseModelBase):
|
class DBInstance(dbmodels.DatabaseModelBase):
|
||||||
"""Defines the task being executed plus the start time."""
|
|
||||||
|
|
||||||
_data_fields = ['name', 'created', 'compute_instance_id',
|
_data_fields = ['name', 'created', 'compute_instance_id',
|
||||||
'task_id', 'task_description', 'task_start_time',
|
'task_id', 'task_description', 'task_start_time',
|
||||||
'volume_id', 'deleted', 'tenant_id',
|
'volume_id', 'deleted', 'tenant_id',
|
||||||
'datastore_version_id', 'configuration_id', 'slave_of_id',
|
'datastore_version_id', 'configuration_id', 'slave_of_id',
|
||||||
'cluster_id', 'shard_id', 'type']
|
'cluster_id', 'shard_id', 'type', 'region_id']
|
||||||
|
|
||||||
def __init__(self, task_status, **kwargs):
|
def __init__(self, task_status, **kwargs):
|
||||||
"""
|
"""
|
||||||
|
@ -20,6 +20,7 @@ import webob.exc
|
|||||||
from trove.backup.models import Backup as backup_model
|
from trove.backup.models import Backup as backup_model
|
||||||
from trove.backup import views as backup_views
|
from trove.backup import views as backup_views
|
||||||
import trove.common.apischema as apischema
|
import trove.common.apischema as apischema
|
||||||
|
from trove.common import cfg
|
||||||
from trove.common import exception
|
from trove.common import exception
|
||||||
from trove.common.i18n import _
|
from trove.common.i18n import _
|
||||||
from trove.common.i18n import _LI
|
from trove.common.i18n import _LI
|
||||||
@ -37,6 +38,7 @@ from trove.module import models as module_models
|
|||||||
from trove.module import views as module_views
|
from trove.module import views as module_views
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@ -302,6 +304,7 @@ class InstanceController(wsgi.Controller):
|
|||||||
'Cannot specify locality when adding replicas to existing '
|
'Cannot specify locality when adding replicas to existing '
|
||||||
'master.')
|
'master.')
|
||||||
raise exception.BadRequest(msg=dupe_locality_msg)
|
raise exception.BadRequest(msg=dupe_locality_msg)
|
||||||
|
region_name = body['instance'].get('region_name', CONF.os_region_name)
|
||||||
|
|
||||||
instance = models.Instance.create(context, name, flavor_id,
|
instance = models.Instance.create(context, name, flavor_id,
|
||||||
image_id, databases, users,
|
image_id, databases, users,
|
||||||
@ -312,7 +315,8 @@ class InstanceController(wsgi.Controller):
|
|||||||
replica_count=replica_count,
|
replica_count=replica_count,
|
||||||
volume_type=volume_type,
|
volume_type=volume_type,
|
||||||
modules=modules,
|
modules=modules,
|
||||||
locality=locality)
|
locality=locality,
|
||||||
|
region_name=region_name)
|
||||||
|
|
||||||
view = views.InstanceDetailView(instance, req=req)
|
view = views.InstanceDetailView(instance, req=req)
|
||||||
return wsgi.Result(view.data(), 200)
|
return wsgi.Result(view.data(), 200)
|
||||||
|
@ -37,6 +37,7 @@ class InstanceView(object):
|
|||||||
"flavor": self._build_flavor_info(),
|
"flavor": self._build_flavor_info(),
|
||||||
"datastore": {"type": self.instance.datastore.name,
|
"datastore": {"type": self.instance.datastore.name,
|
||||||
"version": self.instance.datastore_version.name},
|
"version": self.instance.datastore_version.name},
|
||||||
|
"region": self.instance.region_name
|
||||||
}
|
}
|
||||||
if self.instance.volume_support:
|
if self.instance.volume_support:
|
||||||
instance_dict['volume'] = {'size': self.instance.volume_size}
|
instance_dict['volume'] = {'size': self.instance.volume_size}
|
||||||
|
@ -41,9 +41,9 @@ class NovaNetworkStruct(object):
|
|||||||
|
|
||||||
class NeutronDriver(base.NetworkDriver):
|
class NeutronDriver(base.NetworkDriver):
|
||||||
|
|
||||||
def __init__(self, context):
|
def __init__(self, context, region_name):
|
||||||
try:
|
try:
|
||||||
self.client = remote.create_neutron_client(context)
|
self.client = remote.create_neutron_client(context, region_name)
|
||||||
except neutron_exceptions.NeutronClientException as e:
|
except neutron_exceptions.NeutronClientException as e:
|
||||||
raise exception.TroveError(str(e))
|
raise exception.TroveError(str(e))
|
||||||
|
|
||||||
|
@ -27,10 +27,10 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class NovaNetwork(base.NetworkDriver):
|
class NovaNetwork(base.NetworkDriver):
|
||||||
|
|
||||||
def __init__(self, context):
|
def __init__(self, context, region_name):
|
||||||
try:
|
try:
|
||||||
self.client = remote.create_nova_client(
|
self.client = remote.create_nova_client(
|
||||||
context)
|
context, region_name)
|
||||||
except nova_exceptions.ClientException as e:
|
except nova_exceptions.ClientException as e:
|
||||||
raise exception.TroveError(str(e))
|
raise exception.TroveError(str(e))
|
||||||
|
|
||||||
|
@ -428,6 +428,7 @@ class Manager(periodic_task.PeriodicTasks):
|
|||||||
mgmtmodels.publish_exist_events(self.exists_transformer,
|
mgmtmodels.publish_exist_events(self.exists_transformer,
|
||||||
self.admin_context)
|
self.admin_context)
|
||||||
|
|
||||||
|
if CONF.quota_notification_interval:
|
||||||
@periodic_task.periodic_task(spacing=CONF.quota_notification_interval)
|
@periodic_task.periodic_task(spacing=CONF.quota_notification_interval)
|
||||||
def publish_quota_notifications(self, context):
|
def publish_quota_notifications(self, context):
|
||||||
nova_client = remote.create_nova_client(self.admin_context)
|
nova_client = remote.create_nova_client(self.admin_context)
|
||||||
|
@ -565,7 +565,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
|||||||
LOG.error(msg_create)
|
LOG.error(msg_create)
|
||||||
# Make sure we log any unexpected errors from the create
|
# Make sure we log any unexpected errors from the create
|
||||||
if not isinstance(e_create, TroveError):
|
if not isinstance(e_create, TroveError):
|
||||||
LOG.error(e_create)
|
LOG.exception(e_create)
|
||||||
msg_delete = (
|
msg_delete = (
|
||||||
_("An error occurred while deleting a bad "
|
_("An error occurred while deleting a bad "
|
||||||
"replication snapshot from instance %(source)s.") %
|
"replication snapshot from instance %(source)s.") %
|
||||||
@ -866,7 +866,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
|||||||
|
|
||||||
def _create_volume(self, volume_size, volume_type, datastore_manager):
|
def _create_volume(self, volume_size, volume_type, datastore_manager):
|
||||||
LOG.debug("Begin _create_volume for id: %s" % self.id)
|
LOG.debug("Begin _create_volume for id: %s" % self.id)
|
||||||
volume_client = create_cinder_client(self.context)
|
volume_client = create_cinder_client(self.context, self.region_name)
|
||||||
volume_desc = ("datastore volume for %s" % self.id)
|
volume_desc = ("datastore volume for %s" % self.id)
|
||||||
volume_ref = volume_client.volumes.create(
|
volume_ref = volume_client.volumes.create(
|
||||||
volume_size, name="datastore-%s" % self.id,
|
volume_size, name="datastore-%s" % self.id,
|
||||||
@ -1009,7 +1009,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
|||||||
|
|
||||||
def _create_secgroup(self, datastore_manager):
|
def _create_secgroup(self, datastore_manager):
|
||||||
security_group = SecurityGroup.create_for_instance(
|
security_group = SecurityGroup.create_for_instance(
|
||||||
self.id, self.context)
|
self.id, self.context, self.region_name)
|
||||||
tcp_ports = CONF.get(datastore_manager).tcp_ports
|
tcp_ports = CONF.get(datastore_manager).tcp_ports
|
||||||
udp_ports = CONF.get(datastore_manager).udp_ports
|
udp_ports = CONF.get(datastore_manager).udp_ports
|
||||||
icmp = CONF.get(datastore_manager).icmp
|
icmp = CONF.get(datastore_manager).icmp
|
||||||
@ -1037,7 +1037,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
|||||||
if protocol == 'icmp':
|
if protocol == 'icmp':
|
||||||
SecurityGroupRule.create_sec_group_rule(
|
SecurityGroupRule.create_sec_group_rule(
|
||||||
s_group, 'icmp', None, None,
|
s_group, 'icmp', None, None,
|
||||||
cidr, self.context)
|
cidr, self.context, self.region_name)
|
||||||
else:
|
else:
|
||||||
for port_or_range in set(ports):
|
for port_or_range in set(ports):
|
||||||
try:
|
try:
|
||||||
@ -1045,7 +1045,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
|||||||
from_, to_ = utils.gen_ports(port_or_range)
|
from_, to_ = utils.gen_ports(port_or_range)
|
||||||
SecurityGroupRule.create_sec_group_rule(
|
SecurityGroupRule.create_sec_group_rule(
|
||||||
s_group, protocol, int(from_), int(to_),
|
s_group, protocol, int(from_), int(to_),
|
||||||
cidr, self.context)
|
cidr, self.context, self.region_name)
|
||||||
except (ValueError, TroveError):
|
except (ValueError, TroveError):
|
||||||
set_error_and_raise([from_, to_])
|
set_error_and_raise([from_, to_])
|
||||||
|
|
||||||
@ -1143,7 +1143,8 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
|
|||||||
# If volume has been resized it must be manually removed in cinder
|
# If volume has been resized it must be manually removed in cinder
|
||||||
try:
|
try:
|
||||||
if self.volume_id:
|
if self.volume_id:
|
||||||
volume_client = create_cinder_client(self.context)
|
volume_client = create_cinder_client(self.context,
|
||||||
|
self.region_name)
|
||||||
volume = volume_client.volumes.get(self.volume_id)
|
volume = volume_client.volumes.get(self.volume_id)
|
||||||
if volume.status == "available":
|
if volume.status == "available":
|
||||||
LOG.info(_("Deleting volume %(v)s for instance: %(i)s.")
|
LOG.info(_("Deleting volume %(v)s for instance: %(i)s.")
|
||||||
|
@ -696,7 +696,8 @@ class CreateInstance(object):
|
|||||||
|
|
||||||
# Check these attrs only are returned in create response
|
# Check these attrs only are returned in create response
|
||||||
allowed_attrs = ['created', 'flavor', 'addresses', 'id', 'links',
|
allowed_attrs = ['created', 'flavor', 'addresses', 'id', 'links',
|
||||||
'name', 'status', 'updated', 'datastore', 'fault']
|
'name', 'status', 'updated', 'datastore', 'fault',
|
||||||
|
'region']
|
||||||
if ROOT_ON_CREATE:
|
if ROOT_ON_CREATE:
|
||||||
allowed_attrs.append('password')
|
allowed_attrs.append('password')
|
||||||
if VOLUME_SUPPORT:
|
if VOLUME_SUPPORT:
|
||||||
@ -1138,7 +1139,8 @@ class TestInstanceListing(object):
|
|||||||
@test
|
@test
|
||||||
def test_index_list(self):
|
def test_index_list(self):
|
||||||
allowed_attrs = ['id', 'links', 'name', 'status', 'flavor',
|
allowed_attrs = ['id', 'links', 'name', 'status', 'flavor',
|
||||||
'datastore', 'ip', 'hostname', 'replica_of']
|
'datastore', 'ip', 'hostname', 'replica_of',
|
||||||
|
'region']
|
||||||
if VOLUME_SUPPORT:
|
if VOLUME_SUPPORT:
|
||||||
allowed_attrs.append('volume')
|
allowed_attrs.append('volume')
|
||||||
instances = dbaas.instances.list()
|
instances = dbaas.instances.list()
|
||||||
@ -1159,7 +1161,7 @@ class TestInstanceListing(object):
|
|||||||
def test_get_instance(self):
|
def test_get_instance(self):
|
||||||
allowed_attrs = ['created', 'databases', 'flavor', 'hostname', 'id',
|
allowed_attrs = ['created', 'databases', 'flavor', 'hostname', 'id',
|
||||||
'links', 'name', 'status', 'updated', 'ip',
|
'links', 'name', 'status', 'updated', 'ip',
|
||||||
'datastore', 'fault']
|
'datastore', 'fault', 'region']
|
||||||
if VOLUME_SUPPORT:
|
if VOLUME_SUPPORT:
|
||||||
allowed_attrs.append('volume')
|
allowed_attrs.append('volume')
|
||||||
else:
|
else:
|
||||||
@ -1247,7 +1249,7 @@ class TestInstanceListing(object):
|
|||||||
'flavor', 'guest_status', 'host', 'hostname', 'id',
|
'flavor', 'guest_status', 'host', 'hostname', 'id',
|
||||||
'name', 'root_enabled_at', 'root_enabled_by',
|
'name', 'root_enabled_at', 'root_enabled_by',
|
||||||
'server_state_description', 'status', 'datastore',
|
'server_state_description', 'status', 'datastore',
|
||||||
'updated', 'users', 'volume', 'fault']
|
'updated', 'users', 'volume', 'fault', 'region']
|
||||||
with CheckInstance(result._info) as check:
|
with CheckInstance(result._info) as check:
|
||||||
check.contains_allowed_attrs(
|
check.contains_allowed_attrs(
|
||||||
result._info, allowed_attrs,
|
result._info, allowed_attrs,
|
||||||
|
@ -232,7 +232,9 @@ class MgmtInstancesIndex(object):
|
|||||||
'task_description',
|
'task_description',
|
||||||
'tenant_id',
|
'tenant_id',
|
||||||
'updated',
|
'updated',
|
||||||
|
'region'
|
||||||
]
|
]
|
||||||
|
|
||||||
if CONFIG.trove_volume_support:
|
if CONFIG.trove_volume_support:
|
||||||
expected_fields.append('volume')
|
expected_fields.append('volume')
|
||||||
|
|
||||||
@ -254,6 +256,7 @@ class MgmtInstancesIndex(object):
|
|||||||
Make sure that the deleted= filter works as expected, and no instances
|
Make sure that the deleted= filter works as expected, and no instances
|
||||||
are excluded.
|
are excluded.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not hasattr(self.client.management.index, 'deleted'):
|
if not hasattr(self.client.management.index, 'deleted'):
|
||||||
raise SkipTest("instance index must have a deleted "
|
raise SkipTest("instance index must have a deleted "
|
||||||
"label for this test")
|
"label for this test")
|
||||||
|
@ -361,6 +361,9 @@ class FakeGuest(object):
|
|||||||
def backup_required_for_replication(self):
|
def backup_required_for_replication(self):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
def post_processing_required_for_replication(self):
|
||||||
|
return False
|
||||||
|
|
||||||
def module_list(self, context, include_contents=False):
|
def module_list(self, context, include_contents=False):
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
@ -870,13 +870,13 @@ def get_client_data(context):
|
|||||||
return CLIENT_DATA[context]
|
return CLIENT_DATA[context]
|
||||||
|
|
||||||
|
|
||||||
def fake_create_nova_client(context):
|
def fake_create_nova_client(context, region_name=None):
|
||||||
return get_client_data(context)['nova']
|
return get_client_data(context)['nova']
|
||||||
|
|
||||||
|
|
||||||
def fake_create_nova_volume_client(context):
|
def fake_create_nova_volume_client(context, region_name=None):
|
||||||
return get_client_data(context)['volume']
|
return get_client_data(context)['volume']
|
||||||
|
|
||||||
|
|
||||||
def fake_create_cinder_client(context):
|
def fake_create_cinder_client(context, region_name=None):
|
||||||
return get_client_data(context)['volume']
|
return get_client_data(context)['volume']
|
||||||
|
@ -177,6 +177,7 @@ class TestClusterController(TestCase):
|
|||||||
'flavor_id': '1234',
|
'flavor_id': '1234',
|
||||||
'availability_zone': 'az',
|
'availability_zone': 'az',
|
||||||
'modules': None,
|
'modules': None,
|
||||||
|
'region_name': None,
|
||||||
'nics': [
|
'nics': [
|
||||||
{'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'}
|
{'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'}
|
||||||
]
|
]
|
||||||
|
@ -142,6 +142,7 @@ class TestClusterController(trove_testtools.TestCase):
|
|||||||
'flavor_id': '1234',
|
'flavor_id': '1234',
|
||||||
'availability_zone': 'az',
|
'availability_zone': 'az',
|
||||||
'modules': None,
|
'modules': None,
|
||||||
|
'region_name': None,
|
||||||
'nics': [
|
'nics': [
|
||||||
{'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'}
|
{'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'}
|
||||||
]
|
]
|
||||||
|
@ -157,6 +157,7 @@ class TestClusterController(trove_testtools.TestCase):
|
|||||||
"flavor_id": "1234",
|
"flavor_id": "1234",
|
||||||
"availability_zone": "az",
|
"availability_zone": "az",
|
||||||
'modules': None,
|
'modules': None,
|
||||||
|
'region_name': None,
|
||||||
"nics": [
|
"nics": [
|
||||||
{"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"}
|
{"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"}
|
||||||
]
|
]
|
||||||
@ -167,6 +168,7 @@ class TestClusterController(trove_testtools.TestCase):
|
|||||||
"flavor_id": "1234",
|
"flavor_id": "1234",
|
||||||
"availability_zone": "az",
|
"availability_zone": "az",
|
||||||
'modules': None,
|
'modules': None,
|
||||||
|
'region_name': None,
|
||||||
"nics": [
|
"nics": [
|
||||||
{"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"}
|
{"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"}
|
||||||
]
|
]
|
||||||
@ -177,6 +179,7 @@ class TestClusterController(trove_testtools.TestCase):
|
|||||||
"flavor_id": "1234",
|
"flavor_id": "1234",
|
||||||
"availability_zone": "az",
|
"availability_zone": "az",
|
||||||
'modules': None,
|
'modules': None,
|
||||||
|
'region_name': None,
|
||||||
"nics": [
|
"nics": [
|
||||||
{"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"}
|
{"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"}
|
||||||
]
|
]
|
||||||
|
@ -142,6 +142,7 @@ class TestClusterController(trove_testtools.TestCase):
|
|||||||
'flavor_id': '1234',
|
'flavor_id': '1234',
|
||||||
'availability_zone': 'az',
|
'availability_zone': 'az',
|
||||||
'modules': None,
|
'modules': None,
|
||||||
|
'region_name': None,
|
||||||
'nics': [
|
'nics': [
|
||||||
{'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'}
|
{'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'}
|
||||||
]
|
]
|
||||||
|
@ -38,11 +38,15 @@ class TestModels(trove_testtools.TestCase):
|
|||||||
mock_flv.ephemeral = 0
|
mock_flv.ephemeral = 0
|
||||||
|
|
||||||
test_instances = [{'flavor_id': 1, 'volume_size': 10},
|
test_instances = [{'flavor_id': 1, 'volume_size': 10},
|
||||||
{'flavor_id': 1, 'volume_size': 1.5},
|
{'flavor_id': 1, 'volume_size': 1.5,
|
||||||
{'flavor_id': 2, 'volume_size': 3}]
|
'region_name': 'home'},
|
||||||
|
{'flavor_id': 2, 'volume_size': 3,
|
||||||
|
'region_name': 'work'}]
|
||||||
models.validate_instance_flavors(Mock(), test_instances,
|
models.validate_instance_flavors(Mock(), test_instances,
|
||||||
True, True)
|
True, True)
|
||||||
create_nove_cli_mock.assert_called_once_with(ANY)
|
create_nove_cli_mock.assert_has_calls([call(ANY, None),
|
||||||
|
call(ANY, 'home'),
|
||||||
|
call(ANY, 'work')])
|
||||||
|
|
||||||
self.assertRaises(exception.LocalStorageNotSpecified,
|
self.assertRaises(exception.LocalStorageNotSpecified,
|
||||||
models.validate_instance_flavors,
|
models.validate_instance_flavors,
|
||||||
|
@ -24,6 +24,7 @@ from testtools import ExpectedException, matchers
|
|||||||
from trove.common import cfg
|
from trove.common import cfg
|
||||||
from trove.common.context import TroveContext
|
from trove.common.context import TroveContext
|
||||||
from trove.common import exception
|
from trove.common import exception
|
||||||
|
from trove.common import glance_remote
|
||||||
from trove.common import remote
|
from trove.common import remote
|
||||||
from trove.tests.fakes.swift import SwiftClientStub
|
from trove.tests.fakes.swift import SwiftClientStub
|
||||||
from trove.tests.unittests import trove_testtools
|
from trove.tests.unittests import trove_testtools
|
||||||
@ -574,6 +575,47 @@ class TestCreateSwiftClient(trove_testtools.TestCase):
|
|||||||
client.url)
|
client.url)
|
||||||
|
|
||||||
|
|
||||||
|
class TestCreateGlanceClient(trove_testtools.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super(TestCreateGlanceClient, self).setUp()
|
||||||
|
self.glance_public_url = 'http://publicURL/v2'
|
||||||
|
self.glancev3_public_url_region_two = 'http://publicURL-r2/v3'
|
||||||
|
self.service_catalog = [
|
||||||
|
{
|
||||||
|
'endpoints': [
|
||||||
|
{
|
||||||
|
'region': 'RegionOne',
|
||||||
|
'publicURL': self.glance_public_url,
|
||||||
|
}
|
||||||
|
],
|
||||||
|
'type': 'image'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'endpoints': [
|
||||||
|
{
|
||||||
|
'region': 'RegionOne',
|
||||||
|
'publicURL': 'http://publicURL-r1/v1',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'region': 'RegionTwo',
|
||||||
|
'publicURL': self.glancev3_public_url_region_two,
|
||||||
|
}
|
||||||
|
],
|
||||||
|
'type': 'imagev3'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
def test_create_with_no_conf_no_catalog(self):
|
||||||
|
self.assertRaises(exception.EmptyCatalog,
|
||||||
|
glance_remote.create_glance_client,
|
||||||
|
TroveContext())
|
||||||
|
|
||||||
|
def test_create(self):
|
||||||
|
client = glance_remote.create_glance_client(
|
||||||
|
TroveContext(service_catalog=self.service_catalog))
|
||||||
|
self.assertIsNotNone(client)
|
||||||
|
|
||||||
|
|
||||||
class TestEndpoints(trove_testtools.TestCase):
|
class TestEndpoints(trove_testtools.TestCase):
|
||||||
"""
|
"""
|
||||||
Copied from glance/tests/unit/test_auth.py.
|
Copied from glance/tests/unit/test_auth.py.
|
||||||
|
@ -19,6 +19,7 @@ from mock import Mock, patch
|
|||||||
from neutronclient.common import exceptions as neutron_exceptions
|
from neutronclient.common import exceptions as neutron_exceptions
|
||||||
from neutronclient.v2_0 import client as NeutronClient
|
from neutronclient.v2_0 import client as NeutronClient
|
||||||
|
|
||||||
|
from trove.common import cfg
|
||||||
from trove.common import exception
|
from trove.common import exception
|
||||||
from trove.common.models import NetworkRemoteModelBase
|
from trove.common.models import NetworkRemoteModelBase
|
||||||
from trove.common import remote
|
from trove.common import remote
|
||||||
@ -28,6 +29,9 @@ from trove.network.neutron import NeutronDriver as driver
|
|||||||
from trove.tests.unittests import trove_testtools
|
from trove.tests.unittests import trove_testtools
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
class NeutronDriverTest(trove_testtools.TestCase):
|
class NeutronDriverTest(trove_testtools.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(NeutronDriverTest, self).setUp()
|
super(NeutronDriverTest, self).setUp()
|
||||||
@ -50,26 +54,30 @@ class NeutronDriverTest(trove_testtools.TestCase):
|
|||||||
def test_create_security_group(self):
|
def test_create_security_group(self):
|
||||||
driver.create_security_group = Mock()
|
driver.create_security_group = Mock()
|
||||||
RemoteSecurityGroup.create(name=Mock(), description=Mock(),
|
RemoteSecurityGroup.create(name=Mock(), description=Mock(),
|
||||||
context=self.context)
|
context=self.context,
|
||||||
|
region_name=CONF.os_region_name)
|
||||||
self.assertEqual(1, driver.create_security_group.call_count)
|
self.assertEqual(1, driver.create_security_group.call_count)
|
||||||
|
|
||||||
def test_add_security_group_rule(self):
|
def test_add_security_group_rule(self):
|
||||||
driver.add_security_group_rule = Mock()
|
driver.add_security_group_rule = Mock()
|
||||||
RemoteSecurityGroup.add_rule(sec_group_id=Mock(), protocol=Mock(),
|
RemoteSecurityGroup.add_rule(sec_group_id=Mock(), protocol=Mock(),
|
||||||
from_port=Mock(), to_port=Mock(),
|
from_port=Mock(), to_port=Mock(),
|
||||||
cidr=Mock(), context=self.context)
|
cidr=Mock(), context=self.context,
|
||||||
|
region_name=CONF.os_region_name)
|
||||||
self.assertEqual(1, driver.add_security_group_rule.call_count)
|
self.assertEqual(1, driver.add_security_group_rule.call_count)
|
||||||
|
|
||||||
def test_delete_security_group_rule(self):
|
def test_delete_security_group_rule(self):
|
||||||
driver.delete_security_group_rule = Mock()
|
driver.delete_security_group_rule = Mock()
|
||||||
RemoteSecurityGroup.delete_rule(sec_group_rule_id=Mock(),
|
RemoteSecurityGroup.delete_rule(sec_group_rule_id=Mock(),
|
||||||
context=self.context)
|
context=self.context,
|
||||||
|
region_name=CONF.os_region_name)
|
||||||
self.assertEqual(1, driver.delete_security_group_rule.call_count)
|
self.assertEqual(1, driver.delete_security_group_rule.call_count)
|
||||||
|
|
||||||
def test_delete_security_group(self):
|
def test_delete_security_group(self):
|
||||||
driver.delete_security_group = Mock()
|
driver.delete_security_group = Mock()
|
||||||
RemoteSecurityGroup.delete(sec_group_id=Mock(),
|
RemoteSecurityGroup.delete(sec_group_id=Mock(),
|
||||||
context=self.context)
|
context=self.context,
|
||||||
|
region_name=CONF.os_region_name)
|
||||||
self.assertEqual(1, driver.delete_security_group.call_count)
|
self.assertEqual(1, driver.delete_security_group.call_count)
|
||||||
|
|
||||||
|
|
||||||
@ -81,7 +89,7 @@ class NeutronDriverExceptionTest(trove_testtools.TestCase):
|
|||||||
self.orig_NeutronClient = NeutronClient.Client
|
self.orig_NeutronClient = NeutronClient.Client
|
||||||
self.orig_get_endpoint = remote.get_endpoint
|
self.orig_get_endpoint = remote.get_endpoint
|
||||||
remote.get_endpoint = MagicMock(return_value="neutron_url")
|
remote.get_endpoint = MagicMock(return_value="neutron_url")
|
||||||
mock_driver = neutron.NeutronDriver(self.context)
|
mock_driver = neutron.NeutronDriver(self.context, "regionOne")
|
||||||
NetworkRemoteModelBase.get_driver = MagicMock(
|
NetworkRemoteModelBase.get_driver = MagicMock(
|
||||||
return_value=mock_driver)
|
return_value=mock_driver)
|
||||||
|
|
||||||
@ -98,23 +106,27 @@ class NeutronDriverExceptionTest(trove_testtools.TestCase):
|
|||||||
def test_create_sg_with_exception(self, mock_logging):
|
def test_create_sg_with_exception(self, mock_logging):
|
||||||
self.assertRaises(exception.SecurityGroupCreationError,
|
self.assertRaises(exception.SecurityGroupCreationError,
|
||||||
RemoteSecurityGroup.create,
|
RemoteSecurityGroup.create,
|
||||||
"sg_name", "sg_desc", self.context)
|
"sg_name", "sg_desc", self.context,
|
||||||
|
region_name=CONF.os_region_name)
|
||||||
|
|
||||||
@patch('trove.network.neutron.LOG')
|
@patch('trove.network.neutron.LOG')
|
||||||
def test_add_sg_rule_with_exception(self, mock_logging):
|
def test_add_sg_rule_with_exception(self, mock_logging):
|
||||||
self.assertRaises(exception.SecurityGroupRuleCreationError,
|
self.assertRaises(exception.SecurityGroupRuleCreationError,
|
||||||
RemoteSecurityGroup.add_rule,
|
RemoteSecurityGroup.add_rule,
|
||||||
"12234", "tcp", "22", "22",
|
"12234", "tcp", "22", "22",
|
||||||
"0.0.0.0/8", self.context)
|
"0.0.0.0/8", self.context,
|
||||||
|
region_name=CONF.os_region_name)
|
||||||
|
|
||||||
@patch('trove.network.neutron.LOG')
|
@patch('trove.network.neutron.LOG')
|
||||||
def test_delete_sg_rule_with_exception(self, mock_logging):
|
def test_delete_sg_rule_with_exception(self, mock_logging):
|
||||||
self.assertRaises(exception.SecurityGroupRuleDeletionError,
|
self.assertRaises(exception.SecurityGroupRuleDeletionError,
|
||||||
RemoteSecurityGroup.delete_rule,
|
RemoteSecurityGroup.delete_rule,
|
||||||
"12234", self.context)
|
"12234", self.context,
|
||||||
|
region_name=CONF.os_region_name)
|
||||||
|
|
||||||
@patch('trove.network.neutron.LOG')
|
@patch('trove.network.neutron.LOG')
|
||||||
def test_delete_sg_with_exception(self, mock_logging):
|
def test_delete_sg_with_exception(self, mock_logging):
|
||||||
self.assertRaises(exception.SecurityGroupDeletionError,
|
self.assertRaises(exception.SecurityGroupDeletionError,
|
||||||
RemoteSecurityGroup.delete,
|
RemoteSecurityGroup.delete,
|
||||||
"123445", self.context)
|
"123445", self.context,
|
||||||
|
region_name=CONF.os_region_name)
|
||||||
|
@ -18,6 +18,7 @@ from mock import Mock
|
|||||||
from mock import patch
|
from mock import patch
|
||||||
from novaclient import exceptions as nova_exceptions
|
from novaclient import exceptions as nova_exceptions
|
||||||
|
|
||||||
|
from trove.common import cfg
|
||||||
from trove.common import exception
|
from trove.common import exception
|
||||||
import trove.common.remote
|
import trove.common.remote
|
||||||
from trove.extensions.security_group import models as sec_mod
|
from trove.extensions.security_group import models as sec_mod
|
||||||
@ -26,6 +27,9 @@ from trove.tests.fakes import nova
|
|||||||
from trove.tests.unittests import trove_testtools
|
from trove.tests.unittests import trove_testtools
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Unit tests for testing the exceptions raised by Security Groups
|
Unit tests for testing the exceptions raised by Security Groups
|
||||||
"""
|
"""
|
||||||
@ -49,7 +53,7 @@ class Security_Group_Exceptions_Test(trove_testtools.TestCase):
|
|||||||
self.FakeClient.security_group_rules.delete = fException
|
self.FakeClient.security_group_rules.delete = fException
|
||||||
|
|
||||||
trove.common.remote.create_nova_client = (
|
trove.common.remote.create_nova_client = (
|
||||||
lambda c: self._return_mocked_nova_client(c))
|
lambda c, r: self._return_mocked_nova_client(c))
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super(Security_Group_Exceptions_Test, self).tearDown()
|
super(Security_Group_Exceptions_Test, self).tearDown()
|
||||||
@ -67,25 +71,29 @@ class Security_Group_Exceptions_Test(trove_testtools.TestCase):
|
|||||||
sec_mod.RemoteSecurityGroup.create,
|
sec_mod.RemoteSecurityGroup.create,
|
||||||
"TestName",
|
"TestName",
|
||||||
"TestDescription",
|
"TestDescription",
|
||||||
self.context)
|
self.context,
|
||||||
|
region_name=CONF.os_region_name)
|
||||||
|
|
||||||
@patch('trove.network.nova.LOG')
|
@patch('trove.network.nova.LOG')
|
||||||
def test_failed_to_delete_security_group(self, mock_logging):
|
def test_failed_to_delete_security_group(self, mock_logging):
|
||||||
self.assertRaises(exception.SecurityGroupDeletionError,
|
self.assertRaises(exception.SecurityGroupDeletionError,
|
||||||
sec_mod.RemoteSecurityGroup.delete,
|
sec_mod.RemoteSecurityGroup.delete,
|
||||||
1, self.context)
|
1, self.context,
|
||||||
|
region_name=CONF.os_region_name)
|
||||||
|
|
||||||
@patch('trove.network.nova.LOG')
|
@patch('trove.network.nova.LOG')
|
||||||
def test_failed_to_create_security_group_rule(self, mock_logging):
|
def test_failed_to_create_security_group_rule(self, mock_logging):
|
||||||
self.assertRaises(exception.SecurityGroupRuleCreationError,
|
self.assertRaises(exception.SecurityGroupRuleCreationError,
|
||||||
sec_mod.RemoteSecurityGroup.add_rule,
|
sec_mod.RemoteSecurityGroup.add_rule,
|
||||||
1, "tcp", 3306, 3306, "0.0.0.0/0", self.context)
|
1, "tcp", 3306, 3306, "0.0.0.0/0", self.context,
|
||||||
|
region_name=CONF.os_region_name)
|
||||||
|
|
||||||
@patch('trove.network.nova.LOG')
|
@patch('trove.network.nova.LOG')
|
||||||
def test_failed_to_delete_security_group_rule(self, mock_logging):
|
def test_failed_to_delete_security_group_rule(self, mock_logging):
|
||||||
self.assertRaises(exception.SecurityGroupRuleDeletionError,
|
self.assertRaises(exception.SecurityGroupRuleDeletionError,
|
||||||
sec_mod.RemoteSecurityGroup.delete_rule,
|
sec_mod.RemoteSecurityGroup.delete_rule,
|
||||||
1, self.context)
|
1, self.context,
|
||||||
|
region_name=CONF.os_region_name)
|
||||||
|
|
||||||
|
|
||||||
class fake_RemoteSecGr(object):
|
class fake_RemoteSecGr(object):
|
||||||
@ -93,7 +101,7 @@ class fake_RemoteSecGr(object):
|
|||||||
self.id = uuid.uuid4()
|
self.id = uuid.uuid4()
|
||||||
return {'id': self.id}
|
return {'id': self.id}
|
||||||
|
|
||||||
def delete(self, context):
|
def delete(self, context, region_name):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@ -135,7 +143,7 @@ class SecurityGroupDeleteTest(trove_testtools.TestCase):
|
|||||||
sec_mod.SecurityGroupInstanceAssociation.find_by = self.fException
|
sec_mod.SecurityGroupInstanceAssociation.find_by = self.fException
|
||||||
self.assertIsNone(
|
self.assertIsNone(
|
||||||
sec_mod.SecurityGroup.delete_for_instance(
|
sec_mod.SecurityGroup.delete_for_instance(
|
||||||
uuid.uuid4(), self.context))
|
uuid.uuid4(), self.context, CONF.os_region_name))
|
||||||
|
|
||||||
def test_get_security_group_from_assoc_with_db_exception(self):
|
def test_get_security_group_from_assoc_with_db_exception(self):
|
||||||
|
|
||||||
@ -156,7 +164,7 @@ class SecurityGroupDeleteTest(trove_testtools.TestCase):
|
|||||||
return_value=new_fake_RemoteSecGrAssoc())
|
return_value=new_fake_RemoteSecGrAssoc())
|
||||||
self.assertIsNone(
|
self.assertIsNone(
|
||||||
sec_mod.SecurityGroup.delete_for_instance(
|
sec_mod.SecurityGroup.delete_for_instance(
|
||||||
i_id, self.context))
|
i_id, self.context, CONF.os_region_name))
|
||||||
|
|
||||||
def test_delete_secgr_assoc_with_db_exception(self):
|
def test_delete_secgr_assoc_with_db_exception(self):
|
||||||
|
|
||||||
@ -171,4 +179,4 @@ class SecurityGroupDeleteTest(trove_testtools.TestCase):
|
|||||||
get_security_group(), 'delete'))
|
get_security_group(), 'delete'))
|
||||||
self.assertIsNone(
|
self.assertIsNone(
|
||||||
sec_mod.SecurityGroup.delete_for_instance(
|
sec_mod.SecurityGroup.delete_for_instance(
|
||||||
i_id, self.context))
|
i_id, self.context, CONF.os_region_name))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user