(fix troveclient gate) Use alt-demo network
This change uses the alt-demo network instead of sharing the private network from the demo tenant. This should fix the tempest tests on python-troveclient complaining about this and also brings us one step closer to using neutron entirely. Tested with the demo tenant as well and it still puts on a network without using the --nic parameter, the only drawback is that the ip doesn't appear on the trove show command. The scenario and api tests were modified to always pass in the nic parameter (as it probably should have been done from the beginning anyways). Change-Id: I9f3cbae3490e9995ba5f835fc2304442b83464e4 Closes-Bug: 1647001
This commit is contained in:
parent
b8bf53cb77
commit
d54cd03199
@ -361,6 +361,27 @@ function _create_private_subnet_v6 {
|
||||
echo $ipv6_subnet_id
|
||||
}
|
||||
|
||||
# Set up a network on the alt_demo tenant. Requires ROUTER_ID, REGION_NAME and IP_VERSION to be set
|
||||
function set_up_network() {
|
||||
local CLOUD_USER=$1
|
||||
local PROJECT_ID=$2
|
||||
local NET_NAME=$3
|
||||
local SUBNET_NAME=$4
|
||||
local IPV6_SUBNET_NAME=$5
|
||||
|
||||
NEW_NET_ID=$(openstack --os-cloud ${CLOUD_USER} --os-region "$REGION_NAME" network create --project ${PROJECT_ID} "$NET_NAME" | grep ' id ' | get_field 2)
|
||||
if [[ "$IP_VERSION" =~ 4.* ]]; then
|
||||
NEW_SUBNET_ID=$(_create_private_subnet_v4 ${PROJECT_ID} ${NEW_NET_ID} ${SUBNET_NAME} ${CLOUD_USER})
|
||||
openstack --os-cloud ${CLOUD_USER} --os-region "$REGION_NAME" router add subnet $ROUTER_ID $NEW_SUBNET_ID
|
||||
fi
|
||||
if [[ "$IP_VERSION" =~ .*6 ]]; then
|
||||
NEW_IPV6_SUBNET_ID=$(_create_private_subnet_v6 ${PROJECT_ID} ${NEW_NET_ID} ${IPV6_SUBNET_NAME} ${CLOUD_USER})
|
||||
openstack --os-cloud ${CLOUD_USER} --os-region "$REGION_NAME" router add subnet $ROUTER_ID $NEW_IPV6_SUBNET_ID
|
||||
fi
|
||||
|
||||
echo $NEW_NET_ID
|
||||
}
|
||||
|
||||
# finalize_trove_network() - do the last thing(s) before starting Trove
|
||||
function finalize_trove_network {
|
||||
|
||||
@ -384,19 +405,15 @@ function finalize_trove_network {
|
||||
ALT_PRIVATE_NETWORK_NAME=${TROVE_PRIVATE_NETWORK_NAME}
|
||||
ALT_PRIVATE_SUBNET_NAME=${TROVE_PRIVATE_SUBNET_NAME}
|
||||
ALT_PRIVATE_IPV6_SUBNET_NAME=ipv6-${ALT_PRIVATE_SUBNET_NAME}
|
||||
ALT_NET_ID=$(openstack --os-cloud ${ADMIN_ALT_DEMO_CLOUD} --os-region "$REGION_NAME" network create --project ${ALT_TENANT_ID} "$ALT_PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
|
||||
if [[ "$IP_VERSION" =~ 4.* ]]; then
|
||||
ALT_SUBNET_ID=$(_create_private_subnet_v4 ${ALT_TENANT_ID} ${ALT_NET_ID} ${ALT_PRIVATE_SUBNET_NAME} ${ADMIN_ALT_DEMO_CLOUD})
|
||||
openstack --os-cloud ${ADMIN_ALT_DEMO_CLOUD} --os-region "$REGION_NAME" router add subnet $ROUTER_ID $ALT_SUBNET_ID
|
||||
echo "Created network ${ALT_PRIVATE_NETWORK_NAME} (${ALT_NET_ID}): ${ALT_PRIVATE_SUBNET_NAME} (${ALT_SUBNET_ID})"
|
||||
else
|
||||
echo "Only IPV4 supported at present"
|
||||
fi
|
||||
#if [[ "$IP_VERSION" =~ .*6 ]]; then
|
||||
# ALT_IPV6_SUBNET_ID=$(_create_private_subnet_v6 ${ALT_TENANT_ID} ${ALT_NET_ID} ${ALT_PRIVATE_IPV6_SUBNET_NAME} ${ADMIN_ALT_DEMO_CLOUD})
|
||||
# openstack --os-cloud ${ADMIN_ALT_DEMO_CLOUD} --os-region "$REGION_NAME" router add subnet $ROUTER_ID $ALT_IPV6_SUBNET_ID
|
||||
# echo "Created IPv6 network ${ALT_PRIVATE_NETWORK_NAME} (${ALT_NET_ID}): ${ALT_PRIVATE_IPV6_SUBNET_NAME} (${ALT_IPV6_SUBNET_ID})"
|
||||
#fi
|
||||
ALT_NET_ID=$(set_up_network $ADMIN_ALT_DEMO_CLOUD $ALT_TENANT_ID $ALT_PRIVATE_NETWORK_NAME $ALT_PRIVATE_SUBNET_NAME $ALT_PRIVATE_IPV6_SUBNET_NAME)
|
||||
echo "Created network ${ALT_PRIVATE_NETWORK_NAME} (${ALT_NET_ID})"
|
||||
|
||||
# Set up a management network to test that functionality
|
||||
ALT_MGMT_NETWORK_NAME=trove-mgmt
|
||||
ALT_MGMT_SUBNET_NAME=${ALT_MGMT_NETWORK_NAME}-subnet
|
||||
ALT_MGMT_IPV6_SUBNET_NAME=ipv6-${ALT_MGMT_SUBNET_NAME}
|
||||
ALT_MGMT_ID=$(set_up_network $ADMIN_ALT_DEMO_CLOUD $ALT_TENANT_ID $ALT_MGMT_NETWORK_NAME $ALT_MGMT_SUBNET_NAME $ALT_MGMT_IPV6_SUBNET_NAME)
|
||||
echo "Created network ${ALT_MGMT_NETWORK_NAME} (${ALT_MGMT_ID})"
|
||||
|
||||
# Make sure we can reach the VMs
|
||||
local replace_range=${SUBNETPOOL_PREFIX_V4}
|
||||
@ -405,26 +422,24 @@ function finalize_trove_network {
|
||||
fi
|
||||
sudo ip route replace $replace_range via $ROUTER_GW_IP
|
||||
|
||||
# Share the private network so that the alt_demo tenant can access it too.
|
||||
# This will be used as the management network for the alt_demo tenant.
|
||||
NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network list | grep " $PRIVATE_NETWORK_NAME " | awk '{print $2}')
|
||||
$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network set --share $NET_ID)
|
||||
echo "Using network ${PRIVATE_NETWORK_NAME} (${NET_ID}) as the Management network for Trove."
|
||||
|
||||
echo "Neutron network list:"
|
||||
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network list
|
||||
|
||||
# Now make sure the conf settings are right
|
||||
iniset $TROVE_CONF DEFAULT network_label_regex .*
|
||||
iniset $TROVE_CONF DEFAULT ip_regex .*
|
||||
iniset $TROVE_CONF DEFAULT black_list_regex ^10.0.1.*
|
||||
iniset $TROVE_CONF DEFAULT default_neutron_networks ${NET_ID}
|
||||
iniset $TROVE_CONF DEFAULT network_label_regex "${ALT_PRIVATE_NETWORK_NAME}"
|
||||
iniset $TROVE_CONF DEFAULT ip_regex ""
|
||||
iniset $TROVE_CONF DEFAULT black_list_regex ""
|
||||
# Don't use a default network for now, until the neutron issues are figured out
|
||||
#iniset $TROVE_CONF DEFAULT default_neutron_networks "${ALT_MGMT_ID}"
|
||||
iniset $TROVE_CONF DEFAULT default_neutron_networks ""
|
||||
iniset $TROVE_CONF DEFAULT network_driver trove.network.neutron.NeutronDriver
|
||||
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT network_label_regex .*
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT ip_regex .*
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT black_list_regex ^10.0.1.*
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT default_neutron_networks ${NET_ID}
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT network_label_regex "${ALT_PRIVATE_NETWORK_NAME}"
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT ip_regex ""
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT black_list_regex ""
|
||||
# Don't use a default network for now, until the neutron issues are figured out
|
||||
#iniset $TROVE_TASKMANAGER_CONF DEFAULT default_neutron_networks "${ALT_MGMT_ID}"
|
||||
iniset $TROVE_CONF DEFAULT default_neutron_networks ""
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT network_driver trove.network.neutron.NeutronDriver
|
||||
}
|
||||
|
||||
|
@ -730,12 +730,9 @@ function mod_confs() {
|
||||
local credentials="--os-username=admin --os-password=$ADMIN_PASSWORD --os-tenant-name=admin --os-auth-url=$TROVE_AUTH_ENDPOINT --os-region $REGION_NAME"
|
||||
TROVE_NET_ID=$(openstack $credentials network list | grep " $TROVE_PRIVATE_NETWORK_NAME " | awk '{print $2}')
|
||||
TROVE_SUBNET_ID=$(openstack $credentials subnet list | grep " $TROVE_PRIVATE_SUBNET_NAME " | awk '{print $2}')
|
||||
# echo "Using network ${TROVE_PRIVATE_NETWORK_NAME} (${TROVE_NET_ID}): ${TROVE_PRIVATE_SUBNET_NAME} (${TROVE_SUBNET_ID})"
|
||||
# sed -i "s,%shared_network%,$TROVE_NET_ID,g" $TEST_CONF
|
||||
# sed -i "s,%shared_network_subnet%,$TROVE_SUBNET_ID,g" $TEST_CONF
|
||||
# remove the network stuff until the alt_demo network issues are sorted out
|
||||
sed -i "/%shared_network%/d" $TEST_CONF
|
||||
sed -i "/%shared_network_subnet%/d" $TEST_CONF
|
||||
echo "Using network ${TROVE_PRIVATE_NETWORK_NAME} (${TROVE_NET_ID}): ${TROVE_PRIVATE_SUBNET_NAME} (${TROVE_SUBNET_ID})"
|
||||
sed -i "s,%shared_network%,$TROVE_NET_ID,g" $TEST_CONF
|
||||
sed -i "s,%shared_network_subnet%,$TROVE_SUBNET_ID,g" $TEST_CONF
|
||||
else
|
||||
# do not leave invalid keys in the configuration when using Nova for networking
|
||||
sed -i "/%shared_network%/d" $TEST_CONF
|
||||
|
@ -340,6 +340,7 @@ class RestoreUsingBackup(object):
|
||||
instance_info.volume,
|
||||
datastore=instance_info.dbaas_datastore,
|
||||
datastore_version=instance_info.dbaas_datastore_version,
|
||||
nics=instance_info.nics,
|
||||
restorePoint=restorePoint)
|
||||
assert_equal(200, instance_info.dbaas.last_http_code)
|
||||
assert_equal("BUILD", result.status)
|
||||
@ -532,6 +533,7 @@ class FakeTestHugeBackupOnSmallInstance(BackupRestoreMixin):
|
||||
datastore=instance_info.dbaas_datastore,
|
||||
datastore_version=(instance_info.
|
||||
dbaas_datastore_version),
|
||||
nics=instance_info.nics,
|
||||
restorePoint={"backupRef": self.new_backup.id})
|
||||
assert_equal(403, instance_info.dbaas.last_http_code)
|
||||
|
||||
@ -549,6 +551,7 @@ class FakeTestHugeBackupOnSmallInstance(BackupRestoreMixin):
|
||||
datastore=instance_info.dbaas_datastore,
|
||||
datastore_version=(instance_info.
|
||||
dbaas_datastore_version),
|
||||
nics=instance_info.nics,
|
||||
restorePoint={"backupRef": self.new_backup.id})
|
||||
|
||||
assert_equal(403, instance_info.dbaas.last_http_code)
|
||||
|
@ -25,6 +25,7 @@ from proboscis import test
|
||||
from trove.common.utils import poll_until
|
||||
from trove import tests
|
||||
from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE
|
||||
from trove.tests.config import CONFIG
|
||||
from trove.tests.util.check import AttrCheck
|
||||
from trove.tests.util import create_dbaas_client
|
||||
from trove.tests.util import create_nova_client
|
||||
@ -215,6 +216,10 @@ class DatastoreFlavorAssociation(object):
|
||||
self.name2 = "test_instance2"
|
||||
self.volume = {'size': 2}
|
||||
self.instance_id = None
|
||||
self.nics = None
|
||||
shared_network = CONFIG.get('shared_network', None)
|
||||
if shared_network:
|
||||
self.nics = [{'net-id': shared_network}]
|
||||
|
||||
@test
|
||||
@time_out(TIMEOUT_INSTANCE_CREATE)
|
||||
@ -222,7 +227,8 @@ class DatastoreFlavorAssociation(object):
|
||||
# all the nova flavors are associated with the default datastore
|
||||
result = self.rd_client.instances.create(
|
||||
name=self.name1, flavor_id='1', volume=self.volume,
|
||||
datastore=self.datastore.id)
|
||||
datastore=self.datastore.id,
|
||||
nics=self.nics)
|
||||
self.instance_id = result.id
|
||||
assert_equal(200, self.rd_client.last_http_code)
|
||||
|
||||
@ -255,4 +261,5 @@ class DatastoreFlavorAssociation(object):
|
||||
assert_raises(exceptions.BadRequest,
|
||||
self.rd_client.instances.create, self.name2,
|
||||
flavor_not_associated, self.volume,
|
||||
datastore=self.datastore.id)
|
||||
datastore=self.datastore.id,
|
||||
nics=self.nics)
|
||||
|
@ -87,7 +87,10 @@ class InstanceTestInfo(object):
|
||||
self.id = None # The ID of the instance in the database.
|
||||
self.local_id = None
|
||||
self.address = None
|
||||
self.nics = None # The dict of type/id for nics used on the intance.
|
||||
self.nics = None # The dict of type/id for nics used on the instance.
|
||||
shared_network = CONFIG.get('shared_network', None)
|
||||
if shared_network:
|
||||
self.nics = [{'net-id': shared_network}]
|
||||
self.initial_result = None # The initial result from the create call.
|
||||
self.user_ip = None # The IP address of the instance, given to user.
|
||||
self.infra_ip = None # The infrastructure network IP address.
|
||||
@ -263,7 +266,8 @@ class CreateInstanceQuotaTest(unittest.TestCase):
|
||||
dbaas.instances.create,
|
||||
self.test_info.name,
|
||||
self.test_info.dbaas_flavor_href,
|
||||
self.test_info.volume)
|
||||
self.test_info.volume,
|
||||
nics=instance_info.nics)
|
||||
|
||||
def test_update_quota_invalid_resource_should_fail(self):
|
||||
quota_dict = {'invalid_resource': 100}
|
||||
@ -303,7 +307,8 @@ class CreateInstanceQuotaTest(unittest.TestCase):
|
||||
dbaas.instances.create,
|
||||
self.test_info.name,
|
||||
self.test_info.dbaas_flavor_href,
|
||||
self.test_info.volume)
|
||||
self.test_info.volume,
|
||||
nics=instance_info.nics)
|
||||
|
||||
assert_equal(413, dbaas.last_http_code)
|
||||
|
||||
@ -322,7 +327,8 @@ class CreateInstanceQuotaTest(unittest.TestCase):
|
||||
dbaas.instances.create,
|
||||
self.test_info.name,
|
||||
self.test_info.dbaas_flavor_href,
|
||||
self.test_info.volume)
|
||||
self.test_info.volume,
|
||||
nics=instance_info.nics)
|
||||
|
||||
assert_equal(413, dbaas.last_http_code)
|
||||
|
||||
@ -364,7 +370,8 @@ class CreateInstanceFail(object):
|
||||
result = dbaas.instances.create(instance_name,
|
||||
instance_info.dbaas_flavor_href,
|
||||
volume, databases,
|
||||
availability_zone="BAD_ZONE")
|
||||
availability_zone="BAD_ZONE",
|
||||
nics=instance_info.nics)
|
||||
|
||||
poll_until(self.instance_in_error(result.id))
|
||||
instance = dbaas.instances.get(result.id)
|
||||
@ -401,7 +408,8 @@ class CreateInstanceFail(object):
|
||||
volume = None
|
||||
assert_raises(exceptions.BadRequest, dbaas.instances.create,
|
||||
instance_name, '',
|
||||
volume, databases)
|
||||
volume, databases,
|
||||
nics=instance_info.nics)
|
||||
assert_equal(400, dbaas.last_http_code)
|
||||
|
||||
@test(enabled=VOLUME_SUPPORT)
|
||||
@ -411,7 +419,8 @@ class CreateInstanceFail(object):
|
||||
volume = {}
|
||||
assert_raises(exceptions.BadRequest, dbaas.instances.create,
|
||||
instance_name, instance_info.dbaas_flavor_href,
|
||||
volume, databases)
|
||||
volume, databases,
|
||||
nics=instance_info.nics)
|
||||
assert_equal(400, dbaas.last_http_code)
|
||||
|
||||
@test(enabled=VOLUME_SUPPORT)
|
||||
@ -421,7 +430,8 @@ class CreateInstanceFail(object):
|
||||
volume = {'size': None}
|
||||
assert_raises(exceptions.BadRequest, dbaas.instances.create,
|
||||
instance_name, instance_info.dbaas_flavor_href,
|
||||
volume, databases)
|
||||
volume, databases,
|
||||
nics=instance_info.nics)
|
||||
assert_equal(400, dbaas.last_http_code)
|
||||
|
||||
@test(enabled=not VOLUME_SUPPORT)
|
||||
@ -431,7 +441,8 @@ class CreateInstanceFail(object):
|
||||
volume = {'size': 2}
|
||||
assert_raises(exceptions.HTTPNotImplemented, dbaas.instances.create,
|
||||
instance_name, instance_info.dbaas_flavor_href,
|
||||
volume, databases)
|
||||
volume, databases,
|
||||
nics=instance_info.nics)
|
||||
assert_equal(501, dbaas.last_http_code)
|
||||
|
||||
def test_create_failure_with_volume_size_and_disabled_for_datastore(self):
|
||||
@ -442,7 +453,8 @@ class CreateInstanceFail(object):
|
||||
volume = {'size': 2}
|
||||
assert_raises(exceptions.HTTPNotImplemented, dbaas.instances.create,
|
||||
instance_name, instance_info.dbaas_flavor_href,
|
||||
volume, databases, datastore=datastore)
|
||||
volume, databases, datastore=datastore,
|
||||
nics=instance_info.nics)
|
||||
assert_equal(501, dbaas.last_http_code)
|
||||
|
||||
@test(enabled=EPHEMERAL_SUPPORT)
|
||||
@ -452,7 +464,8 @@ class CreateInstanceFail(object):
|
||||
flavor_name = CONFIG.values.get('instance_flavor_name', 'm1.tiny')
|
||||
flavors = dbaas.find_flavors_by_name(flavor_name)
|
||||
assert_raises(exceptions.BadRequest, dbaas.instances.create,
|
||||
instance_name, flavors[0].id, None, databases)
|
||||
instance_name, flavors[0].id, None, databases,
|
||||
nics=instance_info.nics)
|
||||
assert_equal(400, dbaas.last_http_code)
|
||||
|
||||
@test
|
||||
@ -465,7 +478,8 @@ class CreateInstanceFail(object):
|
||||
databases = []
|
||||
assert_raises(exceptions.BadRequest, dbaas.instances.create,
|
||||
instance_name, instance_info.dbaas_flavor_href,
|
||||
volume, databases)
|
||||
volume, databases,
|
||||
nics=instance_info.nics)
|
||||
assert_equal(400, dbaas.last_http_code)
|
||||
|
||||
@test
|
||||
@ -478,7 +492,8 @@ class CreateInstanceFail(object):
|
||||
databases = []
|
||||
assert_raises(exceptions.BadRequest, dbaas.instances.create,
|
||||
instance_name, instance_info.dbaas_flavor_href,
|
||||
volume, databases)
|
||||
volume, databases,
|
||||
nics=instance_info.nics)
|
||||
assert_equal(400, dbaas.last_http_code)
|
||||
|
||||
@test
|
||||
@ -517,7 +532,8 @@ class CreateInstanceFail(object):
|
||||
assert_raises(exceptions.NotFound,
|
||||
dbaas.instances.create, instance_name,
|
||||
instance_info.dbaas_flavor_href,
|
||||
volume, databases, users)
|
||||
volume, databases, users,
|
||||
nics=instance_info.nics)
|
||||
except exceptions.BadRequest as e:
|
||||
assert_equal(e.message,
|
||||
"Please specify datastore. No default datastore "
|
||||
@ -540,7 +556,8 @@ class CreateInstanceFail(object):
|
||||
dbaas.instances.create, instance_name,
|
||||
instance_info.dbaas_flavor_href,
|
||||
volume, databases, users,
|
||||
datastore=datastore)
|
||||
datastore=datastore,
|
||||
nics=instance_info.nics)
|
||||
except exceptions.BadRequest as e:
|
||||
assert_equal(e.message,
|
||||
"Default version for datastore '%s' not found." %
|
||||
@ -561,7 +578,8 @@ class CreateInstanceFail(object):
|
||||
dbaas.instances.create, instance_name,
|
||||
instance_info.dbaas_flavor_href,
|
||||
volume, databases, users,
|
||||
datastore=datastore)
|
||||
datastore=datastore,
|
||||
nics=instance_info.nics)
|
||||
except exceptions.BadRequest as e:
|
||||
assert_equal(e.message,
|
||||
"Datastore '%s' cannot be found." %
|
||||
@ -584,7 +602,8 @@ class CreateInstanceFail(object):
|
||||
instance_info.dbaas_flavor_href,
|
||||
volume, databases, users,
|
||||
datastore=datastore,
|
||||
datastore_version=datastore_version)
|
||||
datastore_version=datastore_version,
|
||||
nics=instance_info.nics)
|
||||
except exceptions.BadRequest as e:
|
||||
assert_equal(e.message,
|
||||
"Datastore version '%s' cannot be found." %
|
||||
@ -607,7 +626,8 @@ class CreateInstanceFail(object):
|
||||
instance_info.dbaas_flavor_href,
|
||||
volume, databases, users,
|
||||
datastore=datastore,
|
||||
datastore_version=datastore_version)
|
||||
datastore_version=datastore_version,
|
||||
nics=instance_info.nics)
|
||||
except exceptions.BadRequest as e:
|
||||
assert_equal(e.message,
|
||||
"Datastore version '%s' is not active." %
|
||||
@ -667,10 +687,6 @@ class CreateInstance(object):
|
||||
else:
|
||||
instance_info.volume = None
|
||||
|
||||
shared_network = CONFIG.get('shared_network', None)
|
||||
if shared_network:
|
||||
instance_info.nics = [{'net-id': shared_network}]
|
||||
|
||||
if create_new_instance():
|
||||
instance_info.initial_result = dbaas.instances.create(
|
||||
instance_info.name,
|
||||
@ -760,7 +776,8 @@ class CreateInstanceFlavors(object):
|
||||
else:
|
||||
volume = None
|
||||
self.result = dbaas.instances.create(instance_name, flavor_id, volume,
|
||||
databases)
|
||||
databases,
|
||||
nics=instance_info.nics)
|
||||
poll_until(self._result_is_active)
|
||||
self._delete_async(self.result.id)
|
||||
|
||||
@ -799,7 +816,8 @@ class CreateInstanceWithNeutron(unittest.TestCase):
|
||||
self.result = self.dbaas_client.instances.create(
|
||||
self.instance_name,
|
||||
instance_info.dbaas_flavor_href,
|
||||
volume, databases)
|
||||
volume, databases,
|
||||
nics=instance_info.nics)
|
||||
self.instance_id = self.result.id
|
||||
|
||||
def verify_instance_is_active():
|
||||
@ -1650,7 +1668,8 @@ class BadInstanceStatusBug(object):
|
||||
|
||||
result = self.client.instances.create('testbox',
|
||||
instance_info.dbaas_flavor_href,
|
||||
size)
|
||||
size,
|
||||
nics=instance_info.nics)
|
||||
id = result.id
|
||||
self.instances.append(id)
|
||||
|
||||
|
@ -50,7 +50,8 @@ class TestBase(object):
|
||||
volume = {'size': size}
|
||||
result = self.dbaas.instances.create(name,
|
||||
instance_info.dbaas_flavor_href,
|
||||
volume, [], [])
|
||||
volume, [], [],
|
||||
nics=instance_info.nics)
|
||||
return result.id
|
||||
|
||||
def wait_for_instance_status(self, instance_id, status="ACTIVE",
|
||||
|
@ -28,6 +28,7 @@ from troveclient.compat import exceptions
|
||||
from trove.common.utils import poll_until
|
||||
from trove.tests.api.instances import EPHEMERAL_SUPPORT
|
||||
from trove.tests.api.instances import VOLUME_SUPPORT
|
||||
from trove.tests.config import CONFIG
|
||||
from trove.tests.util import create_client
|
||||
from trove.tests.util import test_config
|
||||
|
||||
@ -69,8 +70,13 @@ class TestBase(object):
|
||||
volume = None
|
||||
if VOLUME_SUPPORT:
|
||||
volume = {'size': 1}
|
||||
nics = None
|
||||
shared_network = CONFIG.get('shared_network', None)
|
||||
if shared_network:
|
||||
nics = [{'net-id': shared_network}]
|
||||
initial = self.client.instances.create(self.name, self.flavor_id,
|
||||
volume, [], [])
|
||||
volume, [], [],
|
||||
nics=nics)
|
||||
self.id = initial.id
|
||||
self._wait_for_active()
|
||||
|
||||
|
@ -92,9 +92,9 @@ def create_slave():
|
||||
instance_info.name + "_slave",
|
||||
instance_info.dbaas_flavor_href,
|
||||
instance_info.volume,
|
||||
nics=instance_info.nics,
|
||||
datastore=instance_info.dbaas_datastore,
|
||||
datastore_version=instance_info.dbaas_datastore_version,
|
||||
nics=instance_info.nics,
|
||||
replica_of=instance_info.id)
|
||||
assert_equal(200, instance_info.dbaas.last_http_code)
|
||||
assert_equal("BUILD", result.status)
|
||||
@ -132,6 +132,7 @@ class CreateReplicationSlave(object):
|
||||
instance_info.volume,
|
||||
datastore=instance_info.dbaas_datastore,
|
||||
datastore_version=instance_info.dbaas_datastore_version,
|
||||
nics=instance_info.nics,
|
||||
replica_of="Missing replica source")
|
||||
assert_equal(404, instance_info.dbaas.last_http_code)
|
||||
|
||||
|
@ -521,6 +521,7 @@ class ConfigurationRunner(TestRunner):
|
||||
[], [],
|
||||
datastore=self.instance_info.dbaas_datastore,
|
||||
datastore_version=self.instance_info.dbaas_datastore_version,
|
||||
nics=self.instance_info.nics,
|
||||
availability_zone="nova",
|
||||
configuration=config_id)
|
||||
self.assert_client_code(200, client=self.auth_client)
|
||||
|
@ -162,10 +162,7 @@ class InstanceCreateRunner(TestRunner):
|
||||
instance_info.volume = {'size': trove_volume_size}
|
||||
else:
|
||||
instance_info.volume = None
|
||||
|
||||
shared_network = CONFIG.get('shared_network', None)
|
||||
if shared_network:
|
||||
instance_info.nics = [{'net-id': shared_network}]
|
||||
instance_info.nics = self.instance_info.nics
|
||||
|
||||
self.report.log("Testing create instance: %s"
|
||||
% {'name': name,
|
||||
|
@ -68,6 +68,7 @@ class ReplicationRunner(TestRunner):
|
||||
self.instance_info.volume,
|
||||
datastore=self.instance_info.dbaas_datastore,
|
||||
datastore_version=self.instance_info.dbaas_datastore_version,
|
||||
nics=self.instance_info.nics,
|
||||
locality='anti-affinity').id
|
||||
self.assert_client_code(expected_http_code, client=self.auth_client)
|
||||
|
||||
@ -143,6 +144,7 @@ class ReplicationRunner(TestRunner):
|
||||
self.instance_info.volume,
|
||||
datastore=self.instance_info.dbaas_datastore,
|
||||
datastore_version=self.instance_info.dbaas_datastore_version,
|
||||
nics=self.instance_info.nics,
|
||||
replica_of=self.non_affinity_master_id,
|
||||
replica_count=1).id
|
||||
self.assert_client_code(expected_http_code, client=self.auth_client)
|
||||
|
@ -232,6 +232,10 @@ class TestRunner(object):
|
||||
else:
|
||||
self.instance_info.volume_size = None
|
||||
self.instance_info.volume = None
|
||||
self.instance_info.nics = None
|
||||
shared_network = CONFIG.get('shared_network', None)
|
||||
if shared_network:
|
||||
self.instance_info.nics = [{'net-id': shared_network}]
|
||||
|
||||
self._auth_client = None
|
||||
self._unauth_client = None
|
||||
|
Loading…
x
Reference in New Issue
Block a user