Merge "Remove Trove's support for Heat"
This commit is contained in:
commit
3f159fe844
@ -78,17 +78,6 @@ common_opts = [
|
||||
help='Service type to use when searching catalog.'),
|
||||
cfg.StrOpt('cinder_endpoint_type', default='publicURL',
|
||||
help='Service endpoint type to use when searching catalog.'),
|
||||
cfg.URIOpt('heat_url', deprecated_for_removal=True,
|
||||
deprecated_reason=HEAT_REMOVAL_DEPRECATION_WARNING,
|
||||
help='URL without the tenant segment.'),
|
||||
cfg.StrOpt('heat_service_type', default='orchestration',
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=HEAT_REMOVAL_DEPRECATION_WARNING,
|
||||
help='Service type to use when searching catalog.'),
|
||||
cfg.StrOpt('heat_endpoint_type', default='publicURL',
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=HEAT_REMOVAL_DEPRECATION_WARNING,
|
||||
help='Service endpoint type to use when searching catalog.'),
|
||||
cfg.URIOpt('swift_url', help='URL ending in ``AUTH_``.'),
|
||||
cfg.StrOpt('swift_service_type', default='object-store',
|
||||
help='Service type to use when searching catalog.'),
|
||||
@ -221,9 +210,6 @@ common_opts = [
|
||||
cfg.BoolOpt('use_nova_server_volume', default=False,
|
||||
help='Whether to provision a Cinder volume for the '
|
||||
'Nova instance.'),
|
||||
cfg.BoolOpt('use_heat', default=False, deprecated_for_removal=True,
|
||||
deprecated_reason=HEAT_REMOVAL_DEPRECATION_WARNING,
|
||||
help='Use Heat for provisioning.'),
|
||||
cfg.StrOpt('device_path', default='/dev/vdb',
|
||||
help='Device path for volume if volume support is enabled.'),
|
||||
cfg.StrOpt('default_datastore', default=None,
|
||||
@ -239,10 +225,6 @@ common_opts = [
|
||||
help='Maximum time (in seconds) to wait for a server delete.'),
|
||||
cfg.IntOpt('volume_time_out', default=60,
|
||||
help='Maximum time (in seconds) to wait for a volume attach.'),
|
||||
cfg.IntOpt('heat_time_out', default=60, deprecated_for_removal=True,
|
||||
deprecated_reason=HEAT_REMOVAL_DEPRECATION_WARNING,
|
||||
help='Maximum time (in seconds) to wait for a Heat request to '
|
||||
'complete.'),
|
||||
cfg.IntOpt('reboot_time_out', default=60 * 2,
|
||||
help='Maximum time (in seconds) to wait for a server reboot.'),
|
||||
cfg.IntOpt('dns_time_out', default=60 * 2,
|
||||
@ -333,10 +315,6 @@ common_opts = [
|
||||
cfg.StrOpt('remote_cinder_client',
|
||||
default='trove.common.remote.cinder_client',
|
||||
help='Client to send Cinder calls to.'),
|
||||
cfg.StrOpt('remote_heat_client', deprecated_for_removal=True,
|
||||
deprecated_reason=HEAT_REMOVAL_DEPRECATION_WARNING,
|
||||
default='trove.common.remote.heat_client',
|
||||
help='Client to send Heat calls to.'),
|
||||
cfg.StrOpt('remote_swift_client',
|
||||
default='trove.common.remote.swift_client',
|
||||
help='Client to send Swift calls to.'),
|
||||
|
@ -20,7 +20,6 @@ from trove.common import exception
|
||||
from trove.common.strategies.cluster import strategy
|
||||
|
||||
from cinderclient.v2 import client as CinderClient
|
||||
from heatclient.v1 import client as HeatClient
|
||||
from keystoneclient.service_catalog import ServiceCatalog
|
||||
from novaclient.client import Client
|
||||
from swiftclient.client import Connection
|
||||
@ -135,23 +134,6 @@ def cinder_client(context, region_name=None):
|
||||
return client
|
||||
|
||||
|
||||
def heat_client(context, region_name=None):
|
||||
if CONF.heat_url:
|
||||
url = '%(heat_url)s%(tenant)s' % {
|
||||
'heat_url': normalize_url(CONF.heat_url),
|
||||
'tenant': context.tenant}
|
||||
else:
|
||||
url = get_endpoint(context.service_catalog,
|
||||
service_type=CONF.heat_service_type,
|
||||
endpoint_region=region_name or CONF.os_region_name,
|
||||
endpoint_type=CONF.heat_endpoint_type)
|
||||
|
||||
client = HeatClient.Client(token=context.auth_token,
|
||||
os_no_client_auth=True,
|
||||
endpoint=url)
|
||||
return client
|
||||
|
||||
|
||||
def swift_client(context, region_name=None):
|
||||
if CONF.swift_url:
|
||||
# swift_url has a different format so doesn't need to be normalized
|
||||
@ -191,5 +173,4 @@ create_guest_client = import_class(CONF.remote_guest_client)
|
||||
create_nova_client = import_class(CONF.remote_nova_client)
|
||||
create_swift_client = import_class(CONF.remote_swift_client)
|
||||
create_cinder_client = import_class(CONF.remote_cinder_client)
|
||||
create_heat_client = import_class(CONF.remote_heat_client)
|
||||
create_neutron_client = import_class(CONF.remote_neutron_client)
|
||||
|
@ -14,14 +14,12 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import jinja2
|
||||
from oslo_config import cfg as oslo_config
|
||||
from oslo_log import log as logging
|
||||
|
||||
from trove.common import cfg
|
||||
from trove.common import configurations
|
||||
from trove.common import exception
|
||||
from trove.common.i18n import _
|
||||
from trove.common import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -126,20 +124,6 @@ def _validate_datastore(datastore_manager):
|
||||
datastore_manager=datastore_manager)
|
||||
|
||||
|
||||
def load_heat_template(datastore_manager):
|
||||
patterns = ["%s/heat.template" % datastore_manager,
|
||||
"default.heat.template"]
|
||||
_validate_datastore(datastore_manager)
|
||||
try:
|
||||
template_obj = ENV.select_template(patterns)
|
||||
return template_obj
|
||||
except jinja2.TemplateNotFound:
|
||||
msg = _("Missing heat template for %(s_datastore_manager)s.") % (
|
||||
{"s_datastore_manager": datastore_manager})
|
||||
LOG.error(msg)
|
||||
raise exception.TroveError(msg)
|
||||
|
||||
|
||||
class ReplicaSourceConfigTemplate(SingleInstanceConfigTemplate):
|
||||
template_name = "replica_source.config.template"
|
||||
|
||||
|
@ -19,7 +19,6 @@ import traceback
|
||||
from cinderclient import exceptions as cinder_exceptions
|
||||
from eventlet import greenthread
|
||||
from eventlet.timeout import Timeout
|
||||
from heatclient import exc as heat_exceptions
|
||||
from novaclient import exceptions as nova_exceptions
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import timeutils
|
||||
@ -59,15 +58,12 @@ import trove.common.remote as remote
|
||||
from trove.common.remote import create_cinder_client
|
||||
from trove.common.remote import create_dns_client
|
||||
from trove.common.remote import create_guest_client
|
||||
from trove.common.remote import create_heat_client
|
||||
from trove.common import server_group as srv_grp
|
||||
from trove.common.strategies.cluster import strategy
|
||||
from trove.common import template
|
||||
from trove.common import utils
|
||||
from trove.common.utils import try_recover
|
||||
from trove.extensions.mysql import models as mysql_models
|
||||
from trove.extensions.security_group.models import (
|
||||
SecurityGroupInstanceAssociation)
|
||||
from trove.extensions.security_group.models import SecurityGroup
|
||||
from trove.extensions.security_group.models import SecurityGroupRule
|
||||
from trove.instance import models as inst_models
|
||||
@ -89,13 +85,9 @@ VOLUME_TIME_OUT = CONF.volume_time_out # seconds.
|
||||
DNS_TIME_OUT = CONF.dns_time_out # seconds.
|
||||
RESIZE_TIME_OUT = CONF.resize_time_out # seconds.
|
||||
REVERT_TIME_OUT = CONF.revert_time_out # seconds.
|
||||
HEAT_TIME_OUT = CONF.heat_time_out # seconds.
|
||||
USAGE_SLEEP_TIME = CONF.usage_sleep_time # seconds.
|
||||
HEAT_STACK_SUCCESSFUL_STATUSES = [('CREATE', 'CREATE_COMPLETE')]
|
||||
HEAT_RESOURCE_SUCCESSFUL_STATE = 'CREATE_COMPLETE'
|
||||
|
||||
use_nova_server_volume = CONF.use_nova_server_volume
|
||||
use_heat = CONF.use_heat
|
||||
|
||||
|
||||
class NotifyMixin(object):
|
||||
@ -472,12 +464,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
||||
LOG.info(_("Creating instance %s.") % self.id)
|
||||
security_groups = None
|
||||
|
||||
# If security group support is enabled and heat based instance
|
||||
# orchestration is disabled, create a security group.
|
||||
#
|
||||
# Heat based orchestration handles security group(resource)
|
||||
# in the template definition.
|
||||
if CONF.trove_security_groups_support and not use_heat:
|
||||
if CONF.trove_security_groups_support:
|
||||
try:
|
||||
security_groups = self._create_secgroup(datastore_manager)
|
||||
except Exception as e:
|
||||
@ -491,21 +478,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
||||
|
||||
files = self.get_injected_files(datastore_manager)
|
||||
cinder_volume_type = volume_type or CONF.cinder_volume_type
|
||||
if use_heat:
|
||||
msg = _("Support for heat templates in Trove is scheduled for "
|
||||
"removal. You will no longer be able to provide a heat "
|
||||
"template to Trove for the provisioning of resources.")
|
||||
LOG.warning(msg)
|
||||
volume_info = self._create_server_volume_heat(
|
||||
flavor,
|
||||
image_id,
|
||||
datastore_manager,
|
||||
volume_size,
|
||||
availability_zone,
|
||||
nics,
|
||||
files,
|
||||
cinder_volume_type)
|
||||
elif use_nova_server_volume:
|
||||
if use_nova_server_volume:
|
||||
volume_info = self._create_server_volume(
|
||||
flavor['id'],
|
||||
image_id,
|
||||
@ -794,104 +767,6 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
||||
'to_': str(to_)})
|
||||
return final
|
||||
|
||||
def _create_server_volume_heat(self, flavor, image_id,
|
||||
datastore_manager, volume_size,
|
||||
availability_zone, nics, files,
|
||||
volume_type):
|
||||
LOG.debug("Begin _create_server_volume_heat for id: %s" % self.id)
|
||||
try:
|
||||
client = create_heat_client(self.context)
|
||||
tcp_rules_mapping_list = self._build_sg_rules_mapping(CONF.get(
|
||||
datastore_manager).tcp_ports)
|
||||
udp_ports_mapping_list = self._build_sg_rules_mapping(CONF.get(
|
||||
datastore_manager).udp_ports)
|
||||
|
||||
ifaces, ports = self._build_heat_nics(nics)
|
||||
template_obj = template.load_heat_template(datastore_manager)
|
||||
heat_template_unicode = template_obj.render(
|
||||
volume_support=self.volume_support,
|
||||
ifaces=ifaces, ports=ports,
|
||||
tcp_rules=tcp_rules_mapping_list,
|
||||
udp_rules=udp_ports_mapping_list,
|
||||
datastore_manager=datastore_manager,
|
||||
files=files)
|
||||
try:
|
||||
heat_template = heat_template_unicode.encode('utf-8')
|
||||
except UnicodeEncodeError:
|
||||
raise TroveError(_("Failed to utf-8 encode Heat template."))
|
||||
|
||||
parameters = {"Flavor": flavor["name"],
|
||||
"VolumeSize": volume_size,
|
||||
"VolumeType": volume_type,
|
||||
"InstanceId": self.id,
|
||||
"ImageId": image_id,
|
||||
"DatastoreManager": datastore_manager,
|
||||
"AvailabilityZone": availability_zone,
|
||||
"TenantId": self.tenant_id}
|
||||
stack_name = 'trove-%s' % self.id
|
||||
client.stacks.create(stack_name=stack_name,
|
||||
template=heat_template,
|
||||
parameters=parameters)
|
||||
try:
|
||||
utils.poll_until(
|
||||
lambda: client.stacks.get(stack_name),
|
||||
lambda stack: stack.stack_status in ['CREATE_COMPLETE',
|
||||
'CREATE_FAILED'],
|
||||
sleep_time=USAGE_SLEEP_TIME,
|
||||
time_out=HEAT_TIME_OUT)
|
||||
except PollTimeOut:
|
||||
raise TroveError(_("Failed to obtain Heat stack status. "
|
||||
"Timeout occurred."))
|
||||
|
||||
stack = client.stacks.get(stack_name)
|
||||
if ((stack.action, stack.stack_status)
|
||||
not in HEAT_STACK_SUCCESSFUL_STATUSES):
|
||||
raise TroveError(_("Failed to create Heat stack."))
|
||||
|
||||
resource = client.resources.get(stack.id, 'BaseInstance')
|
||||
if resource.resource_status != HEAT_RESOURCE_SUCCESSFUL_STATE:
|
||||
raise TroveError(_("Failed to provision Heat base instance."))
|
||||
instance_id = resource.physical_resource_id
|
||||
|
||||
if self.volume_support:
|
||||
resource = client.resources.get(stack.id, 'DataVolume')
|
||||
if resource.resource_status != HEAT_RESOURCE_SUCCESSFUL_STATE:
|
||||
raise TroveError(_("Failed to provision Heat data "
|
||||
"volume."))
|
||||
volume_id = resource.physical_resource_id
|
||||
self.update_db(compute_instance_id=instance_id,
|
||||
volume_id=volume_id)
|
||||
else:
|
||||
self.update_db(compute_instance_id=instance_id)
|
||||
|
||||
if CONF.trove_security_groups_support:
|
||||
resource = client.resources.get(stack.id, 'DatastoreSG')
|
||||
name = "%s_%s" % (
|
||||
CONF.trove_security_group_name_prefix, self.id)
|
||||
description = _("Security Group for %s") % self.id
|
||||
SecurityGroup.create(
|
||||
id=resource.physical_resource_id,
|
||||
name=name, description=description,
|
||||
user=self.context.user,
|
||||
tenant_id=self.context.tenant)
|
||||
SecurityGroupInstanceAssociation.create(
|
||||
security_group_id=resource.physical_resource_id,
|
||||
instance_id=self.id)
|
||||
|
||||
except (TroveError, heat_exceptions.HTTPNotFound,
|
||||
heat_exceptions.HTTPException) as e:
|
||||
msg = _("Error occurred during Heat stack creation for "
|
||||
"instance %s.") % self.id
|
||||
err = inst_models.InstanceTasks.BUILDING_ERROR_SERVER
|
||||
self._log_and_raise(e, msg, err)
|
||||
|
||||
device_path = self.device_path
|
||||
mount_point = CONF.get(datastore_manager).mount_point
|
||||
volume_info = {'device_path': device_path, 'mount_point': mount_point}
|
||||
|
||||
LOG.debug("End _create_server_volume_heat for id: %s" % self.id)
|
||||
return volume_info
|
||||
|
||||
def _create_server_volume_individually(self, flavor_id, image_id,
|
||||
security_groups, datastore_manager,
|
||||
volume_size, availability_zone,
|
||||
@ -1146,27 +1021,6 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
||||
except (ValueError, TroveError):
|
||||
set_error_and_raise([from_, to_])
|
||||
|
||||
def _build_heat_nics(self, nics):
|
||||
ifaces = []
|
||||
ports = []
|
||||
if nics:
|
||||
for idx, nic in enumerate(nics):
|
||||
iface_id = nic.get('port-id')
|
||||
if iface_id:
|
||||
ifaces.append(iface_id)
|
||||
continue
|
||||
net_id = nic.get('net-id')
|
||||
if net_id:
|
||||
port = {}
|
||||
port['name'] = "Port%s" % idx
|
||||
port['net_id'] = net_id
|
||||
fixed_ip = nic.get('v4-fixed-ip')
|
||||
if fixed_ip:
|
||||
port['fixed_ip'] = fixed_ip
|
||||
ports.append(port)
|
||||
ifaces.append("{Ref: Port%s}" % idx)
|
||||
return ifaces, ports
|
||||
|
||||
|
||||
class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
|
||||
"""
|
||||
@ -1191,13 +1045,7 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
|
||||
LOG.exception(_("Error stopping the datastore before attempting "
|
||||
"to delete instance id %s.") % self.id)
|
||||
try:
|
||||
if use_heat:
|
||||
# Delete the server via heat
|
||||
heatclient = create_heat_client(self.context)
|
||||
name = 'trove-%s' % self.id
|
||||
heatclient.stacks.delete(name)
|
||||
else:
|
||||
self.server.delete()
|
||||
self.server.delete()
|
||||
except Exception as ex:
|
||||
LOG.exception(_("Error during delete compute server %s")
|
||||
% self.server.id)
|
||||
|
@ -1,97 +0,0 @@
|
||||
HeatTemplateFormatVersion: '2012-12-12'
|
||||
Description: Instance creation template for {{datastore_manager}}
|
||||
Parameters:
|
||||
Flavor:
|
||||
Type: String
|
||||
VolumeSize:
|
||||
Type: Number
|
||||
Default : '1'
|
||||
InstanceId:
|
||||
Type: String
|
||||
ImageId:
|
||||
Type: String
|
||||
DatastoreManager:
|
||||
Type: String
|
||||
AvailabilityZone:
|
||||
Type: String
|
||||
Default: nova
|
||||
TenantId:
|
||||
Type: String
|
||||
Resources:
|
||||
{% for port in ports %}
|
||||
{{ port.name }}:
|
||||
Type: OS::Neutron::Port
|
||||
Properties:
|
||||
network_id: "{{ port.net_id }}"
|
||||
security_groups: [{Ref: DatastoreSG}]
|
||||
{% if port.fixed_ip %}
|
||||
fixed_ips: [{"ip_address": "{{ port.fixed_ip }}"}]
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
BaseInstance:
|
||||
Type: AWS::EC2::Instance
|
||||
Metadata:
|
||||
AWS::CloudFormation::Init:
|
||||
config:
|
||||
files:
|
||||
{% for file, content in files.items() %}
|
||||
{{ file }}:
|
||||
content: |
|
||||
{{ content | indent(16) }}
|
||||
mode: '000644'
|
||||
owner: root
|
||||
group: root
|
||||
{% endfor %}
|
||||
Properties:
|
||||
ImageId: {Ref: ImageId}
|
||||
InstanceType: {Ref: Flavor}
|
||||
AvailabilityZone: {Ref: AvailabilityZone}
|
||||
SecurityGroups : [{Ref: DatastoreSG}]
|
||||
UserData:
|
||||
Fn::Base64:
|
||||
Fn::Join:
|
||||
- ''
|
||||
- ["#!/bin/bash -v\n",
|
||||
"/opt/aws/bin/cfn-init\n",
|
||||
"sudo service trove-guest start\n"]
|
||||
{% if volume_support %}
|
||||
DataVolume:
|
||||
Type: AWS::EC2::Volume
|
||||
Properties:
|
||||
Size: {Ref: VolumeSize}
|
||||
AvailabilityZone: {Ref: AvailabilityZone}
|
||||
Tags:
|
||||
- {Key: Usage, Value: Test}
|
||||
MountPoint:
|
||||
Type: AWS::EC2::VolumeAttachment
|
||||
Properties:
|
||||
InstanceId: {Ref: BaseInstance}
|
||||
VolumeId: {Ref: DataVolume}
|
||||
Device: /dev/vdb
|
||||
{% endif %}
|
||||
DatastoreSG:
|
||||
Type: AWS::EC2::SecurityGroup
|
||||
Properties:
|
||||
GroupDescription: Default Security group for {{datastore_manager}}
|
||||
{% if tcp_rules or udp_rules %}
|
||||
SecurityGroupIngress:
|
||||
{% for rule in tcp_rules %}
|
||||
- IpProtocol: "tcp"
|
||||
FromPort: "{{rule.from_}}"
|
||||
ToPort: "{{rule.to_}}"
|
||||
CidrIp: "{{rule.cidr}}"
|
||||
{% endfor %}
|
||||
{% for rule in udp_rules %}
|
||||
- IpProtocol: "udp"
|
||||
FromPort: "{{rule.from_}}"
|
||||
ToPort: "{{rule.to_}}"
|
||||
CidrIp: "{{rule.cidr}}"
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
DatabaseIPAddress:
|
||||
Type: AWS::EC2::EIP
|
||||
DatabaseIPAssoc :
|
||||
Type: AWS::EC2::EIPAssociation
|
||||
Properties:
|
||||
InstanceId: {Ref: BaseInstance}
|
||||
EIP: {Ref: DatabaseIPAddress}
|
@ -427,85 +427,6 @@ class TestCreateNovaClient(trove_testtools.TestCase):
|
||||
admin_client.client.management_url)
|
||||
|
||||
|
||||
class TestCreateHeatClient(trove_testtools.TestCase):
|
||||
def setUp(self):
|
||||
super(TestCreateHeatClient, self).setUp()
|
||||
self.heat_public_url = 'http://publicURL/v2'
|
||||
self.heatv3_public_url_region_two = 'http://publicURL-r2/v3'
|
||||
self.service_catalog = [
|
||||
{
|
||||
'endpoints': [
|
||||
{
|
||||
'region': 'RegionOne',
|
||||
'publicURL': self.heat_public_url,
|
||||
}
|
||||
],
|
||||
'type': 'orchestration'
|
||||
},
|
||||
{
|
||||
'endpoints': [
|
||||
{
|
||||
'region': 'RegionOne',
|
||||
'publicURL': 'http://publicURL-r1/v1',
|
||||
},
|
||||
{
|
||||
'region': 'RegionTwo',
|
||||
'publicURL': self.heatv3_public_url_region_two,
|
||||
}
|
||||
],
|
||||
'type': 'orchestrationv3'
|
||||
}
|
||||
]
|
||||
|
||||
def tearDown(self):
|
||||
super(TestCreateHeatClient, self).tearDown()
|
||||
cfg.CONF.clear_override('heat_url')
|
||||
cfg.CONF.clear_override('heat_service_type')
|
||||
cfg.CONF.clear_override('os_region_name')
|
||||
|
||||
def test_create_with_no_conf_no_catalog(self):
|
||||
self.assertRaises(exception.EmptyCatalog,
|
||||
remote.create_heat_client,
|
||||
TroveContext())
|
||||
|
||||
def test_create_with_conf_override(self):
|
||||
heat_url_from_conf = 'http://example.com'
|
||||
tenant_from_ctx = uuid.uuid4().hex
|
||||
cfg.CONF.set_override('heat_url', heat_url_from_conf,
|
||||
enforce_type=True)
|
||||
|
||||
client = remote.create_heat_client(
|
||||
TroveContext(tenant=tenant_from_ctx))
|
||||
self.assertEqual('%s/%s' % (heat_url_from_conf, tenant_from_ctx),
|
||||
client.http_client.endpoint)
|
||||
|
||||
def test_create_with_conf_override_trailing_slash(self):
|
||||
heat_url_from_conf = 'http://example.com/'
|
||||
tenant_from_ctx = uuid.uuid4().hex
|
||||
cfg.CONF.set_override('heat_url', heat_url_from_conf,
|
||||
enforce_type=True)
|
||||
client = remote.create_heat_client(
|
||||
TroveContext(tenant=tenant_from_ctx))
|
||||
self.assertEqual('%s%s' % (heat_url_from_conf, tenant_from_ctx),
|
||||
client.http_client.endpoint)
|
||||
|
||||
def test_create_with_catalog_and_default_service_type(self):
|
||||
client = remote.create_heat_client(
|
||||
TroveContext(service_catalog=self.service_catalog))
|
||||
self.assertEqual(self.heat_public_url,
|
||||
client.http_client.endpoint)
|
||||
|
||||
def test_create_with_catalog_all_opts(self):
|
||||
cfg.CONF.set_override('heat_service_type', 'orchestrationv3',
|
||||
enforce_type=True)
|
||||
cfg.CONF.set_override('os_region_name', 'RegionTwo',
|
||||
enforce_type=True)
|
||||
client = remote.create_heat_client(
|
||||
TroveContext(service_catalog=self.service_catalog))
|
||||
self.assertEqual(self.heatv3_public_url_region_two,
|
||||
client.http_client.endpoint)
|
||||
|
||||
|
||||
class TestCreateSwiftClient(trove_testtools.TestCase):
|
||||
def setUp(self):
|
||||
super(TestCreateSwiftClient, self).setUp()
|
||||
|
@ -13,9 +13,7 @@ import re
|
||||
|
||||
from mock import Mock
|
||||
|
||||
from trove.common import exception
|
||||
from trove.common import template
|
||||
from trove.common import utils
|
||||
from trove.datastore.models import DatastoreVersion
|
||||
from trove.tests.unittests import trove_testtools
|
||||
from trove.tests.unittests.util import util
|
||||
@ -103,87 +101,3 @@ class TemplateTest(trove_testtools.TestCase):
|
||||
self.flavor_dict,
|
||||
self.server_id)
|
||||
self.assertTrue(self._find_in_template(config.render(), "relay_log"))
|
||||
|
||||
|
||||
class HeatTemplateLoadTest(trove_testtools.TestCase):
|
||||
|
||||
class FakeTemplate(object):
|
||||
def __init__(self):
|
||||
self.name = 'mysql/heat.template'
|
||||
|
||||
def setUp(self):
|
||||
self.default = 'default.heat.template'
|
||||
self.orig_1 = utils.ENV.list_templates
|
||||
self.orig_2 = utils.ENV.get_template
|
||||
super(HeatTemplateLoadTest, self).setUp()
|
||||
|
||||
def tearDown(self):
|
||||
utils.ENV.list_templates = self.orig_1
|
||||
utils.ENV.get_template = self.orig_2
|
||||
super(HeatTemplateLoadTest, self).tearDown()
|
||||
|
||||
def test_heat_template_load_with_invalid_datastore(self):
|
||||
invalid_datastore = 'mysql-blah'
|
||||
self.assertRaises(exception.InvalidDatastoreManager,
|
||||
template.load_heat_template,
|
||||
invalid_datastore)
|
||||
|
||||
def test_heat_template_load_non_default(self):
|
||||
orig = utils.ENV._load_template
|
||||
utils.ENV._load_template = Mock(return_value=self.FakeTemplate())
|
||||
mysql_tmpl = template.load_heat_template('mysql')
|
||||
self.assertNotEqual(mysql_tmpl.name, self.default)
|
||||
utils.ENV._load_template = orig
|
||||
|
||||
def test_heat_template_load_success(self):
|
||||
mysql_tmpl = template.load_heat_template('mysql')
|
||||
redis_tmpl = template.load_heat_template('redis')
|
||||
cassandra_tmpl = template.load_heat_template('cassandra')
|
||||
mongo_tmpl = template.load_heat_template('mongodb')
|
||||
percona_tmpl = template.load_heat_template('percona')
|
||||
couchbase_tmpl = template.load_heat_template('couchbase')
|
||||
self.assertIsNotNone(mysql_tmpl)
|
||||
self.assertIsNotNone(redis_tmpl)
|
||||
self.assertIsNotNone(cassandra_tmpl)
|
||||
self.assertIsNotNone(mongo_tmpl)
|
||||
self.assertIsNotNone(percona_tmpl)
|
||||
self.assertIsNotNone(couchbase_tmpl)
|
||||
self.assertEqual(self.default, mysql_tmpl.name)
|
||||
self.assertEqual(self.default, redis_tmpl.name)
|
||||
self.assertEqual(self.default, cassandra_tmpl.name)
|
||||
self.assertEqual(self.default, mongo_tmpl.name)
|
||||
self.assertEqual(self.default, percona_tmpl.name)
|
||||
self.assertEqual(self.default, couchbase_tmpl.name)
|
||||
|
||||
def test_render_templates_with_ports_from_config(self):
|
||||
mysql_tmpl = template.load_heat_template('mysql')
|
||||
tcp_rules = [{'cidr': "0.0.0.0/0",
|
||||
'from_': 3306,
|
||||
'to_': 3309},
|
||||
{'cidr': "0.0.0.0/0",
|
||||
'from_': 3320,
|
||||
'to_': 33022}]
|
||||
output = mysql_tmpl.render(
|
||||
volume_support=True,
|
||||
ifaces=[], ports=[],
|
||||
tcp_rules=tcp_rules,
|
||||
udp_rules=[],
|
||||
files={})
|
||||
self.assertIsNotNone(output)
|
||||
self.assertIn('FromPort: "3306"', output)
|
||||
self.assertIn('ToPort: "3309"', output)
|
||||
self.assertIn('CidrIp: "0.0.0.0/0"', output)
|
||||
self.assertIn('FromPort: "3320"', output)
|
||||
self.assertIn('ToPort: "33022"', output)
|
||||
|
||||
def test_no_rules_if_no_ports(self):
|
||||
mysql_tmpl = template.load_heat_template('mysql')
|
||||
output = mysql_tmpl.render(
|
||||
volume_support=True,
|
||||
ifaces=[], ports=[],
|
||||
tcp_rules=[],
|
||||
udp_rules=[],
|
||||
files={})
|
||||
self.assertIsNotNone(output)
|
||||
self.assertNotIn('- IpProtocol: "tcp"', output)
|
||||
self.assertNotIn('- IpProtocol: "udp"', output)
|
||||
|
Loading…
x
Reference in New Issue
Block a user