Fix quota calculation for storage test [bug 1715333]

https://bugs.launchpad.net/kloudbuster/+bug/1715333

Only calculate cinder quota for client tenant
use the storage configs vm count and disk size (not the flavor disk size)

Add INFO to display the calculated disk and volume quotas

Only lookup for flavors ond image once at the kloud level (instead of every instance)

Change-Id: Ic92d962d6765e682e3158d7b52d7c7f3310b09bc
This commit is contained in:
ahothan 2017-09-09 08:57:03 -07:00
parent 04552424c8
commit b22f59b8c9
8 changed files with 214 additions and 174 deletions

View File

@ -1,7 +0,0 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

View File

@ -173,4 +173,3 @@
defend, and hold each Contributor harmless for any liability defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability. of your accepting any such warranty or additional liability.

View File

@ -2,5 +2,4 @@ include AUTHORS
include ChangeLog include ChangeLog
exclude .gitignore exclude .gitignore
exclude .gitreview exclude .gitreview
global-exclude *.pyc global-exclude *.pyc

View File

@ -1,2 +1 @@
[python: **.py] [python: **.py]

View File

@ -16,6 +16,7 @@ import os
import time import time
import log as logging import log as logging
from novaclient.exceptions import BadRequest
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -60,15 +61,14 @@ class BaseCompute(object):
5. Security group instance 5. Security group instance
6. Optional parameters: availability zone, user data, config drive 6. Optional parameters: availability zone, user data, config drive
""" """
kloud = self.network.router.user.tenant.kloud
# Get the image id and flavor id from their logical names image = kloud.vm_img
image = self.find_image(image_name) flavor = kloud.flavors[flavor_type]
flavor_type = self.find_flavor(flavor_type)
# Also attach the created security group for the test # Also attach the created security group for the test
instance = self.novaclient.servers.create(name=self.vm_name, instance = self.novaclient.servers.create(name=self.vm_name,
image=image, image=image,
flavor=flavor_type, flavor=flavor,
key_name=keyname, key_name=keyname,
nics=nic, nics=nic,
availability_zone=avail_zone, availability_zone=avail_zone,
@ -118,17 +118,12 @@ class BaseCompute(object):
if self.instance and self.vol: if self.instance and self.vol:
attached_vols = self.novaclient.volumes.get_server_volumes(self.instance.id) attached_vols = self.novaclient.volumes.get_server_volumes(self.instance.id)
if len(attached_vols): if len(attached_vols):
self.novaclient.volumes.delete_server_volume(self.instance.id, self.vol.id) try:
self.novaclient.volumes.delete_server_volume(self.instance.id, self.vol.id)
def find_image(self, image_name): except BadRequest:
""" # WARNING Some resources in client cloud are not cleaned up properly.:
Given a image name return the image id # BadRequest: Invalid volume: Volume must be attached in order to detach
""" pass
try:
image = self.novaclient.glance.find_image(image_name)
return image
except Exception:
return None
def find_flavor(self, flavor_type): def find_flavor(self, flavor_type):
""" """
@ -287,16 +282,21 @@ class Flavor(object):
def list(self): def list(self):
return self.novaclient.flavors.list() return self.novaclient.flavors.list()
def create_flavor(self, name, ram, vcpus, disk, ephemeral, override=False): def create_flavor(self, flavor_dict):
# Creating flavors '''Delete the old flavor with same name if any and create a new one
if override:
self.delete_flavor(name)
return self.novaclient.flavors.create(name=name, ram=ram, vcpus=vcpus,
disk=disk, ephemeral=ephemeral)
def delete_flavor(self, name): flavor_dict: dict with following keys: name, ram, vcpus, disk, ephemeral
'''
name = flavor_dict['name']
flavor = self.get(name)
if flavor:
LOG.info('Deleting old flavor %s', name)
self.delete_flavor(flavor)
LOG.info('Creating flavor %s', name)
return self.novaclient.flavors.create(**flavor_dict)
def delete_flavor(self, flavor):
try: try:
flavor = self.novaclient.flavors.find(name=name)
flavor.delete() flavor.delete()
except Exception: except Exception:
pass pass

View File

@ -183,7 +183,7 @@ class KBRunner(object):
else: else:
LOG.error('[%s] received invalid command: %s' + (vm_name, cmd)) LOG.error('[%s] received invalid command: %s' + (vm_name, cmd))
log_msg = "%d Succeed, %d Failed, %d Pending... Retry #%d" %\ log_msg = "VMs: %d Ready, %d Failed, %d Pending... Retry #%d" %\
(cnt_succ, cnt_failed, len(clist), retry) (cnt_succ, cnt_failed, len(clist), retry)
if sample_count != 0: if sample_count != 0:
log_msg += " (%d sample(s) received)" % sample_count log_msg += " (%d sample(s) received)" % sample_count

View File

@ -56,8 +56,17 @@ LOG = logging.getLogger(__name__)
class KBVMCreationException(Exception): class KBVMCreationException(Exception):
pass pass
class KBFlavorCheckException(Exception):
pass
# flavor names to use
FLAVOR_KB_PROXY = 'KB.proxy'
FLAVOR_KB_CLIENT = 'KB.client'
FLAVOR_KB_SERVER = 'KB.server'
class Kloud(object): class Kloud(object):
def __init__(self, scale_cfg, cred, reusing_tenants,
def __init__(self, scale_cfg, cred, reusing_tenants, vm_img,
testing_side=False, storage_mode=False, multicast_mode=False): testing_side=False, storage_mode=False, multicast_mode=False):
self.tenant_list = [] self.tenant_list = []
self.testing_side = testing_side self.testing_side = testing_side
@ -67,9 +76,9 @@ class Kloud(object):
self.multicast_mode = multicast_mode self.multicast_mode = multicast_mode
self.credentials = cred self.credentials = cred
self.osclient_session = cred.get_session() self.osclient_session = cred.get_session()
self.flavor_to_use = None
self.vm_up_count = 0 self.vm_up_count = 0
self.res_logger = KBResLogger() self.res_logger = KBResLogger()
self.vm_img = vm_img
if testing_side: if testing_side:
self.prefix = 'KBc' self.prefix = 'KBc'
self.name = 'Client Kloud' self.name = 'Client Kloud'
@ -92,8 +101,43 @@ class Kloud(object):
LOG.info("Creating kloud: " + self.prefix) LOG.info("Creating kloud: " + self.prefix)
if self.placement_az: if self.placement_az:
LOG.info('%s Availability Zone: %s' % (self.name, self.placement_az)) LOG.info('%s Availability Zone: %s' % (self.name, self.placement_az))
# A dict of flavors indexed by flavor name
self.flavors = {}
def select_flavor(self):
# Select an existing flavor that Flavor check
flavor_manager = base_compute.Flavor(self.nova_client)
fcand = {'vcpus': sys.maxint, 'ram': sys.maxint, 'disk': sys.maxint}
# find the smallest flavor that is at least 1vcpu, 1024MB ram and 10MB disk
for flavor in flavor_manager.list():
flavor = vars(flavor)
if flavor['vcpus'] < 1 or flavor['ram'] < 1024 or flavor['disk'] < 10:
continue
if flavor['vcpus'] < fcand['vcpus']:
fcand = flavor
elif flavor['vcpus'] == fcand['vcpus']:
if flavor['ram'] < fcand['ram']:
fcand = flavor
elif flavor['ram'] == fcand['ram'] and flavor['disk'] < fcand['disk']:
fcand = flavor
find_flag = True
if find_flag:
LOG.info('Automatically selecting flavor %s to instantiate VMs.' % fcand['name'])
return fcand
LOG.error('Cannot find a flavor which meets the minimum '
'requirements to instantiate VMs.')
raise KBFlavorCheckException()
def create_resources(self, tenant_quota): def create_resources(self, tenant_quota):
def create_flavor(fm, name, flavor_dict, extra_specs):
flavor_dict['name'] = name
flv = fm.create_flavor(flavor_dict)
if extra_specs:
flv.set_keys(extra_specs)
self.flavors[name] = flv
self.res_logger.log('flavors', vars(flv)['name'], vars(flv)['id'])
if self.reusing_tenants: if self.reusing_tenants:
for tenant_info in self.reusing_tenants: for tenant_info in self.reusing_tenants:
tenant_name = tenant_info['name'] tenant_name = tenant_info['name']
@ -112,7 +156,17 @@ class Kloud(object):
for tenant_instance in self.tenant_list: for tenant_instance in self.tenant_list:
tenant_instance.create_resources() tenant_instance.create_resources()
if not self.reusing_tenants: # Create/reuse flavors for this cloud
if self.reusing_tenants:
# If tenants are reused, we do not create new flavors but pick one
# existing that is good enough
flavor = self.select_flavor()
if self.testing_side:
self.flavors[FLAVOR_KB_PROXY] = flavor
self.flavors[FLAVOR_KB_CLIENT] = flavor
else:
self.flavors[FLAVOR_KB_SERVER] = flavor
else:
# Create flavors for servers, clients, and kb-proxy nodes # Create flavors for servers, clients, and kb-proxy nodes
nova_client = self.tenant_list[0].user_list[0].nova_client nova_client = self.tenant_list[0].user_list[0].nova_client
flavor_manager = base_compute.Flavor(nova_client) flavor_manager = base_compute.Flavor(nova_client)
@ -125,35 +179,29 @@ class Kloud(object):
else: else:
flavor_dict['ephemeral'] = 0 flavor_dict['ephemeral'] = 0
if self.testing_side: if self.testing_side:
flv = flavor_manager.create_flavor('KB.proxy', override=True, proxy_flavor = {
ram=2048, vcpus=1, disk=0, ephemeral=0) "vcpus": 1,
self.res_logger.log('flavors', vars(flv)['name'], vars(flv)['id']) "ram": 2048,
flv = flavor_manager.create_flavor('KB.client', override=True, **flavor_dict) "disk": 0,
self.res_logger.log('flavors', vars(flv)['name'], vars(flv)['id']) "ephemeral": 0
}
create_flavor(flavor_manager, FLAVOR_KB_PROXY, proxy_flavor, extra_specs)
create_flavor(flavor_manager, FLAVOR_KB_CLIENT, flavor_dict, extra_specs)
else: else:
flv = flavor_manager.create_flavor('KB.server', override=True, **flavor_dict) create_flavor(flavor_manager, FLAVOR_KB_SERVER, flavor_dict, extra_specs)
self.res_logger.log('flavors', vars(flv)['name'], vars(flv)['id'])
if extra_specs:
flv.set_keys(extra_specs)
def delete_resources(self): def delete_resources(self):
# Deleting flavors created by KloudBuster
try: if not self.reusing_tenants:
nova_client = self.tenant_list[0].user_list[0].nova_client for fn, flavor in self.flavors.iteritems():
except Exception: LOG.info('Deleting flavor %s', fn)
# NOVA Client is not yet initialized, so skip cleaning up... try:
return True flavor.delete()
except Exception as exc:
LOG.warning('Error deleting flavor %s: %s', fn, str(exc))
flag = True flag = True
if not self.reusing_tenants:
flavor_manager = base_compute.Flavor(nova_client)
if self.testing_side:
flavor_manager.delete_flavor('KB.client')
flavor_manager.delete_flavor('KB.proxy')
else:
flavor_manager.delete_flavor('KB.server')
for tnt in self.tenant_list: for tnt in self.tenant_list:
flag = flag & tnt.delete_resources() flag = flag & tnt.delete_resources()
@ -256,7 +304,6 @@ class KloudBuster(object):
self.storage_mode = storage_mode self.storage_mode = storage_mode
self.multicast_mode = multicast_mode self.multicast_mode = multicast_mode
if topology and tenants_list: if topology and tenants_list:
self.topology = None self.topology = None
LOG.warning("REUSING MODE: Topology configs will be ignored.") LOG.warning("REUSING MODE: Topology configs will be ignored.")
@ -289,6 +336,8 @@ class KloudBuster(object):
self.fp_logfile = None self.fp_logfile = None
self.kloud = None self.kloud = None
self.testing_kloud = None self.testing_kloud = None
self.server_vm_img = None
self.client_vm_img = None
def get_hypervisor_list(self, cred): def get_hypervisor_list(self, cred):
ret_list = [] ret_list = []
@ -314,64 +363,79 @@ class KloudBuster(object):
return ret_list return ret_list
def check_and_upload_images(self, retry_count=150): def check_and_upload_image(self, kloud_name, image_name, image_url, sess, retry_count):
'''Check a VM image and upload it if not found
'''
glance_client = glanceclient.Client('2', session=sess)
try:
# Search for the image
img = glance_client.images.list(filters={'name': image_name}).next()
# image found
return img
except StopIteration:
sys.exc_clear()
# Trying to upload image
LOG.info("KloudBuster VM Image is not found in %s, trying to upload it..." % kloud_name)
if not image_url:
LOG.error('Configuration file is missing a VM image pathname (vm_image_name)')
return None
retry = 0 retry = 0
try:
LOG.info("Uploading VM Image from %s..." % image_url)
with open(image_url) as f_image:
img = glance_client.images.create(name=image_name,
disk_format="qcow2",
container_format="bare",
visibility="public")
glance_client.images.upload(img.id, image_data=f_image)
# Check for the image in glance
while img.status in ['queued', 'saving'] and retry < retry_count:
img = glance_client.images.get(img.id)
retry += 1
LOG.debug("Image not yet active, retrying %s of %s...", retry, retry_count)
time.sleep(2)
if img.status != 'active':
LOG.error("Image uploaded but too long to get to active state")
raise Exception("Image update active state timeout")
except glance_exception.HTTPForbidden:
LOG.error("Cannot upload image without admin access. Please make "
"sure the image is uploaded and is either public or owned by you.")
return None
except IOError as exc:
# catch the exception for file based errors.
LOG.error("Failed while uploading the image. Please make sure the "
"image at the specified location %s is correct: %s",
image_url, str(exc))
return None
except keystoneauth1.exceptions.http.NotFound as exc:
LOG.error("Authentication error while uploading the image: " + str(exc))
return None
except Exception:
LOG.error(traceback.format_exc())
LOG.error("Failed while uploading the image: %s", str(exc))
return None
return img
def check_and_upload_images(self, retry_count=150):
image_name = self.client_cfg.image_name image_name = self.client_cfg.image_name
image_url = self.client_cfg.vm_image_file image_url = self.client_cfg.vm_image_file
kloud_name_list = ['Server kloud', 'Client kloud'] self.server_vm_img = self.check_and_upload_image('Server kloud',
session_list = [self.server_cred.get_session(), image_name,
self.client_cred.get_session()] image_url,
for kloud, sess in zip(kloud_name_list, session_list): self.server_cred.get_session(),
glance_client = glanceclient.Client('2', session=sess) retry_count)
try: if self.server_vm_img is None:
# Search for the image return False
img = glance_client.images.list(filters={'name': image_name}).next() if self.client_cred == self.server_cred:
continue self.client_vm_img = self.server_vm_img
except StopIteration: else:
sys.exc_clear() self.client_vm_img = self.check_and_upload_image('Client kloud',
image_name,
# Trying to upload images image_url,
LOG.info("KloudBuster VM Image is not found in %s, trying to upload it..." % kloud) self.client_cred.get_session(),
if not image_url: retry_count)
LOG.error('Configuration file is missing a VM image pathname (vm_image_name)') return self.client_vm_img is not None
return False
retry = 0
try:
LOG.info("Uploading VM Image from %s..." % image_url)
with open(image_url) as f_image:
img = glance_client.images.create(name=image_name,
disk_format="qcow2",
container_format="bare",
visibility="public")
glance_client.images.upload(img.id, image_data=f_image)
# Check for the image in glance
while img.status in ['queued', 'saving'] and retry < retry_count:
img = glance_client.images.get(img.id)
retry += 1
LOG.debug("Image not yet active, retrying %s of %s...", retry, retry_count)
time.sleep(2)
if img.status != 'active':
LOG.error("Image uploaded but too long to get to active state")
raise Exception("Image update active state timeout")
except glance_exception.HTTPForbidden:
LOG.error("Cannot upload image without admin access. Please make "
"sure the image is uploaded and is either public or owned by you.")
return False
except IOError as exc:
# catch the exception for file based errors.
LOG.error("Failed while uploading the image. Please make sure the "
"image at the specified location %s is correct: %s",
image_url, str(exc))
return False
except keystoneauth1.exceptions.http.NotFound as exc:
LOG.error("Authentication error while uploading the image: " + str(exc))
return False
except Exception:
LOG.error(traceback.format_exc())
LOG.error("Failed while uploading the image: %s", str(exc))
return False
return True
return True
def print_provision_info(self): def print_provision_info(self):
""" """
@ -408,8 +472,7 @@ class KloudBuster(object):
for ins in server_list: for ins in server_list:
ins.user_data['role'] = 'HTTP_Server' ins.user_data['role'] = 'HTTP_Server'
ins.user_data['http_server_configs'] = ins.config['http_server_configs'] ins.user_data['http_server_configs'] = ins.config['http_server_configs']
ins.boot_info['flavor_type'] = 'KB.server' if \ ins.boot_info['flavor_type'] = FLAVOR_KB_SERVER
not self.tenants_list['server'] else self.kloud.flavor_to_use
ins.boot_info['user_data'] = str(ins.user_data) ins.boot_info['user_data'] = str(ins.user_data)
elif test_mode == 'multicast': elif test_mode == 'multicast':
# Nuttcp tests over first /25 # Nuttcp tests over first /25
@ -433,8 +496,7 @@ class KloudBuster(object):
ins.user_data['multicast_listener_address_start'] = listener_addr_start ins.user_data['multicast_listener_address_start'] = listener_addr_start
ins.user_data['ntp_clocks'] = clocks ins.user_data['ntp_clocks'] = clocks
ins.user_data['pktsizes'] = self.client_cfg.multicast_tool_configs.pktsizes ins.user_data['pktsizes'] = self.client_cfg.multicast_tool_configs.pktsizes
ins.boot_info['flavor_type'] = 'KB.server' if \ ins.boot_info['flavor_type'] = FLAVOR_KB_SERVER
not self.tenants_list['server'] else self.kloud.flavor_to_use
ins.boot_info['user_data'] = str(ins.user_data) ins.boot_info['user_data'] = str(ins.user_data)
def gen_client_user_data(self, test_mode): def gen_client_user_data(self, test_mode):
@ -457,8 +519,7 @@ class KloudBuster(object):
ins.user_data['target_shared_interface_ip'] = server_list[idx].shared_interface_ip ins.user_data['target_shared_interface_ip'] = server_list[idx].shared_interface_ip
if role == 'Multicast_Client': if role == 'Multicast_Client':
ins.user_data['ntp_clocks'] = clocks ins.user_data['ntp_clocks'] = clocks
ins.boot_info['flavor_type'] = 'KB.client' if \ ins.boot_info['flavor_type'] = FLAVOR_KB_CLIENT
not self.tenants_list['client'] else self.testing_kloud.flavor_to_use
ins.boot_info['user_data'] = str(ins.user_data) ins.boot_info['user_data'] = str(ins.user_data)
else: else:
for idx, ins in enumerate(client_list): for idx, ins in enumerate(client_list):
@ -466,8 +527,7 @@ class KloudBuster(object):
ins.user_data['vm_name'] = ins.vm_name ins.user_data['vm_name'] = ins.vm_name
ins.user_data['redis_server'] = self.kb_proxy.fixed_ip ins.user_data['redis_server'] = self.kb_proxy.fixed_ip
ins.user_data['redis_server_port'] = 6379 ins.user_data['redis_server_port'] = 6379
ins.boot_info['flavor_type'] = 'KB.client' if \ ins.boot_info['flavor_type'] = FLAVOR_KB_CLIENT
not self.tenants_list['client'] else self.testing_kloud.flavor_to_use
ins.boot_info['user_data'] = str(ins.user_data) ins.boot_info['user_data'] = str(ins.user_data)
def gen_metadata(self): def gen_metadata(self):
@ -505,12 +565,15 @@ class KloudBuster(object):
tenant_quota = self.calc_tenant_quota() tenant_quota = self.calc_tenant_quota()
if not self.storage_mode: if not self.storage_mode:
self.kloud = Kloud(self.server_cfg, self.server_cred, self.tenants_list['server'], self.kloud = Kloud(self.server_cfg, self.server_cred, self.tenants_list['server'],
self.server_vm_img,
storage_mode=self.storage_mode, multicast_mode=self.multicast_mode) storage_mode=self.storage_mode, multicast_mode=self.multicast_mode)
self.server_vm_create_thread = threading.Thread(target=self.kloud.create_vms, self.server_vm_create_thread = threading.Thread(target=self.kloud.create_vms,
args=[vm_creation_concurrency]) args=[vm_creation_concurrency])
self.server_vm_create_thread.daemon = True self.server_vm_create_thread.daemon = True
self.testing_kloud = Kloud(self.client_cfg, self.client_cred, self.testing_kloud = Kloud(self.client_cfg, self.client_cred,
self.tenants_list['client'], testing_side=True, self.tenants_list['client'],
self.client_vm_img,
testing_side=True,
storage_mode=self.storage_mode, storage_mode=self.storage_mode,
multicast_mode=self.multicast_mode) multicast_mode=self.multicast_mode)
self.client_vm_create_thread = threading.Thread(target=self.testing_kloud.create_vms, self.client_vm_create_thread = threading.Thread(target=self.testing_kloud.create_vms,
@ -528,8 +591,7 @@ class KloudBuster(object):
self.kb_proxy.vm_name = 'KB-PROXY' self.kb_proxy.vm_name = 'KB-PROXY'
self.kb_proxy.user_data['role'] = 'KB-PROXY' self.kb_proxy.user_data['role'] = 'KB-PROXY'
self.kb_proxy.boot_info['flavor_type'] = 'KB.proxy' if \ self.kb_proxy.boot_info['flavor_type'] = FLAVOR_KB_PROXY
not self.tenants_list['client'] else self.testing_kloud.flavor_to_use
if self.topology: if self.topology:
proxy_hyper = self.topology.clients_rack[0] proxy_hyper = self.topology.clients_rack[0]
self.kb_proxy.boot_info['avail_zone'] = \ self.kb_proxy.boot_info['avail_zone'] = \
@ -661,6 +723,7 @@ class KloudBuster(object):
self.fp_logfile = None self.fp_logfile = None
def get_tenant_vm_count(self, config): def get_tenant_vm_count(self, config):
# this does not apply for storage mode!
return (config['routers_per_tenant'] * config['networks_per_router'] * return (config['routers_per_tenant'] * config['networks_per_router'] *
config['vms_per_network']) config['vms_per_network'])
@ -721,35 +784,50 @@ class KloudBuster(object):
return [server_quota, client_quota] return [server_quota, client_quota]
def calc_nova_quota(self): def calc_nova_quota(self):
total_vm = self.get_tenant_vm_count(self.server_cfg)
server_quota = {} server_quota = {}
server_quota['instances'] = total_vm
server_quota['cores'] = total_vm * self.server_cfg['flavor']['vcpus']
server_quota['ram'] = total_vm * self.server_cfg['flavor']['ram']
client_quota = {} client_quota = {}
total_vm = self.get_tenant_vm_count(self.client_cfg) if self.storage_mode:
# in case of storage, the number of VMs is to be taken from the
# the storage config
total_vm = self.client_cfg['storage_stage_configs']['vm_count']
else:
total_vm = self.get_tenant_vm_count(self.server_cfg)
server_quota['instances'] = total_vm
server_quota['cores'] = total_vm * self.server_cfg['flavor']['vcpus']
server_quota['ram'] = total_vm * self.server_cfg['flavor']['ram']
LOG.info('Server tenant Nova quotas: instances=%d vcpus=%d ram=%dMB',
server_quota['instances'],
server_quota['cores'],
server_quota['ram'])
total_vm = self.get_tenant_vm_count(self.client_cfg)
# add 1 for the proxy
client_quota['instances'] = total_vm + 1 client_quota['instances'] = total_vm + 1
client_quota['cores'] = total_vm * self.client_cfg['flavor']['vcpus'] + 1 client_quota['cores'] = total_vm * self.client_cfg['flavor']['vcpus'] + 1
client_quota['ram'] = total_vm * self.client_cfg['flavor']['ram'] + 2048 client_quota['ram'] = total_vm * self.client_cfg['flavor']['ram'] + 2048
LOG.info('Client tenant Nova quotas: instances=%d vcpus=%d ram=%dMB',
client_quota['instances'],
client_quota['cores'],
client_quota['ram'])
return [server_quota, client_quota] return [server_quota, client_quota]
def calc_cinder_quota(self): def calc_cinder_quota(self):
total_vm = self.get_tenant_vm_count(self.server_cfg) # Cinder quotas are only set for storage mode
svr_disk = self.server_cfg['flavor']['disk'] # Since storage mode only uses client tenant
# Server tenant cinder quota is only used for non-storage case
# we can leave the server quota empty
server_quota = {} server_quota = {}
server_quota['gigabytes'] = total_vm * svr_disk \
if svr_disk != 0 else -1
server_quota['volumes'] = total_vm
total_vm = self.get_tenant_vm_count(self.client_cfg) # Client tenant quota is based on the number of
clt_disk = self.client_cfg['flavor']['disk'] # storage VMs and disk size per VM
# (note this is not the flavor disk size!)
client_quota = {} client_quota = {}
client_quota['gigabytes'] = total_vm * clt_disk + 20 \ if self.storage_mode:
if clt_disk != 0 else -1 storage_cfg = self.client_cfg['storage_stage_configs']
client_quota['volumes'] = total_vm vm_count = storage_cfg['vm_count']
client_quota['gigabytes'] = vm_count * storage_cfg['disk_size']
client_quota['volumes'] = vm_count
LOG.info('Cinder quotas: volumes=%d storage=%dGB', vm_count, client_quota['gigabytes'])
return [server_quota, client_quota] return [server_quota, client_quota]
def calc_tenant_quota(self): def calc_tenant_quota(self):

View File

@ -18,14 +18,10 @@ import base_storage
from keystoneclient import exceptions as keystone_exception from keystoneclient import exceptions as keystone_exception
import log as logging import log as logging
import sys
import users import users
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class KBFlavorCheckException(Exception):
pass
class KBQuotaCheckException(Exception): class KBQuotaCheckException(Exception):
pass pass
@ -99,30 +95,6 @@ class Tenant(object):
neutron_quota.update_quota(self.tenant_quota['neutron']) neutron_quota.update_quota(self.tenant_quota['neutron'])
def check_quota(self): def check_quota(self):
# Flavor check
flavor_manager = base_compute.Flavor(self.kloud.nova_client)
find_flag = False
fcand = {'vcpus': sys.maxint, 'ram': sys.maxint, 'disk': sys.maxint}
for flavor in flavor_manager.list():
flavor = vars(flavor)
if flavor['vcpus'] < 1 or flavor['ram'] < 1024 or flavor['disk'] < 10:
continue
if flavor['vcpus'] < fcand['vcpus']:
fcand = flavor
if flavor['vcpus'] == fcand['vcpus'] and flavor['ram'] < fcand['ram']:
fcand = flavor
if flavor['vcpus'] == fcand['vcpus'] and flavor['ram'] == fcand['ram'] and\
flavor['disk'] < fcand['disk']:
fcand = flavor
find_flag = True
if find_flag:
LOG.info('Automatically selects flavor %s to instantiate VMs.' % fcand['name'])
self.kloud.flavor_to_use = fcand['name']
else:
LOG.error('Cannot find a flavor which meets the minimum '
'requirements to instantiate VMs.')
raise KBFlavorCheckException()
# Nova/Cinder/Neutron quota check # Nova/Cinder/Neutron quota check
tenant_id = self.tenant_id tenant_id = self.tenant_id