Added a check for volume service in quota functions
When displaying the quotas, we have to check if volume service is present in the service catalog. If not - then to remove the volume quotas from the result object. Fixes bug 1075319 Change-Id: Icd049a5b147cac039add1570a166d775fd0d6ec4
This commit is contained in:
parent
220f99e3c7
commit
4c88b9b597
@ -10,4 +10,12 @@
|
|||||||
|
|
||||||
<strong>{% trans "Used" %} <span> {{ usage.quotas.ram.used|intcomma }} MB </span>{% trans "of" %}<span> {{ usage.quotas.ram.quota|intcomma }} MB </span>{% trans "Available RAM" %} </strong>
|
<strong>{% trans "Used" %} <span> {{ usage.quotas.ram.used|intcomma }} MB </span>{% trans "of" %}<span> {{ usage.quotas.ram.quota|intcomma }} MB </span>{% trans "Available RAM" %} </strong>
|
||||||
{% horizon_progress_bar usage.quotas.ram.used usage.quotas.ram.quota %}
|
{% horizon_progress_bar usage.quotas.ram.used usage.quotas.ram.quota %}
|
||||||
|
|
||||||
|
{% if usage.quotas.volumes %}
|
||||||
|
<strong>{% trans "Used" %} <span> {{ usage.quotas.volumes.used|intcomma }} </span>{% trans "of" %}<span> {{ usage.quotas.volumes.quota|intcomma }} </span>{% trans "Available volumes" %} </strong>
|
||||||
|
{% horizon_progress_bar usage.quotas.volumes.used usage.quotas.volumes.quota %}
|
||||||
|
|
||||||
|
<strong>{% trans "Used" %} <span> {{ usage.quotas.gigabytes.used|intcomma }} GB </span>{% trans "of" %}<span> {{ usage.quotas.gigabytes.quota|intcomma }} GB </span>{% trans "Available volume storage" %} </strong>
|
||||||
|
{% horizon_progress_bar usage.quotas.gigabytes.used usage.quotas.gigabytes.quota %}
|
||||||
|
{% endif %}
|
||||||
</div>
|
</div>
|
||||||
|
@ -35,8 +35,11 @@ class QuotaTests(test.APITestCase):
|
|||||||
'flavor_list',
|
'flavor_list',
|
||||||
'tenant_floating_ip_list',
|
'tenant_floating_ip_list',
|
||||||
'tenant_quota_get',),
|
'tenant_quota_get',),
|
||||||
|
quotas: ('is_service_enabled',),
|
||||||
cinder: ('volume_list', 'tenant_quota_get',)})
|
cinder: ('volume_list', 'tenant_quota_get',)})
|
||||||
def test_tenant_quota_usages(self):
|
def test_tenant_quota_usages(self):
|
||||||
|
quotas.is_service_enabled(IsA(http.HttpRequest),
|
||||||
|
'volume').AndReturn(True)
|
||||||
api.nova.flavor_list(IsA(http.HttpRequest)) \
|
api.nova.flavor_list(IsA(http.HttpRequest)) \
|
||||||
.AndReturn(self.flavors.list())
|
.AndReturn(self.flavors.list())
|
||||||
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
|
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
|
||||||
@ -67,3 +70,36 @@ class QuotaTests(test.APITestCase):
|
|||||||
|
|
||||||
# Compare internal structure of usages to expected.
|
# Compare internal structure of usages to expected.
|
||||||
self.assertEquals(quota_usages.usages, expected_output)
|
self.assertEquals(quota_usages.usages, expected_output)
|
||||||
|
|
||||||
|
@test.create_stubs({api.nova: ('server_list',
|
||||||
|
'flavor_list',
|
||||||
|
'tenant_floating_ip_list',
|
||||||
|
'tenant_quota_get',),
|
||||||
|
quotas: ('is_service_enabled',)})
|
||||||
|
def test_tenant_quota_usages_without_volume(self):
|
||||||
|
quotas.is_service_enabled(IsA(http.HttpRequest),
|
||||||
|
'volume').AndReturn(False)
|
||||||
|
api.nova.flavor_list(IsA(http.HttpRequest)) \
|
||||||
|
.AndReturn(self.flavors.list())
|
||||||
|
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
|
||||||
|
.AndReturn(self.quotas.first())
|
||||||
|
api.nova.tenant_floating_ip_list(IsA(http.HttpRequest)) \
|
||||||
|
.AndReturn(self.floating_ips.list())
|
||||||
|
api.nova.server_list(IsA(http.HttpRequest)) \
|
||||||
|
.AndReturn(self.servers.list())
|
||||||
|
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
|
||||||
|
quota_usages = quotas.tenant_quota_usages(self.request)
|
||||||
|
expected_output = {
|
||||||
|
'injected_file_content_bytes': {'quota': 1},
|
||||||
|
'metadata_items': {'quota': 1},
|
||||||
|
'injected_files': {'quota': 1},
|
||||||
|
'ram': {'available': 8976, 'used': 1024, 'quota': 10000},
|
||||||
|
'floating_ips': {'available': 0, 'used': 2, 'quota': 1},
|
||||||
|
'instances': {'available': 8, 'used': 2, 'quota': 10},
|
||||||
|
'cores': {'available': 8, 'used': 2, 'quota': 10}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Compare internal structure of usages to expected.
|
||||||
|
self.assertEquals(quota_usages.usages, expected_output)
|
||||||
|
@ -52,37 +52,41 @@ class QuotaUsage(dict):
|
|||||||
self.usages[name]['available'] = available
|
self.usages[name]['available'] = available
|
||||||
|
|
||||||
|
|
||||||
def get_quota_data(request, method_name):
|
def _get_quota_data(request, method_name, disabled_quotas=[]):
|
||||||
quotasets = []
|
quotasets = []
|
||||||
tenant_id = request.user.tenant_id
|
tenant_id = request.user.tenant_id
|
||||||
quotasets.append(getattr(nova, method_name)(request, tenant_id))
|
quotasets.append(getattr(nova, method_name)(request, tenant_id))
|
||||||
if is_service_enabled(request, 'volume'):
|
|
||||||
quotasets.append(getattr(cinder, method_name)(request, tenant_id))
|
|
||||||
qs = QuotaSet()
|
qs = QuotaSet()
|
||||||
|
if 'volumes' not in disabled_quotas:
|
||||||
|
quotasets.append(getattr(cinder, method_name)(request, tenant_id))
|
||||||
for quota in itertools.chain(*quotasets):
|
for quota in itertools.chain(*quotasets):
|
||||||
|
if quota.name not in disabled_quotas:
|
||||||
qs[quota.name] = quota.limit
|
qs[quota.name] = quota.limit
|
||||||
return qs
|
return qs
|
||||||
|
|
||||||
|
|
||||||
def get_default_quota_data(request):
|
def get_default_quota_data(request, disabled_quotas=[]):
|
||||||
return get_quota_data(request, "default_quota_get")
|
return _get_quota_data(request, "default_quota_get", disabled_quotas)
|
||||||
|
|
||||||
|
|
||||||
def get_tenant_quota_data(request):
|
def get_tenant_quota_data(request, disabled_quotas=[]):
|
||||||
return get_quota_data(request, "tenant_quota_get")
|
return _get_quota_data(request, "tenant_quota_get", disabled_quotas)
|
||||||
|
|
||||||
|
|
||||||
@memoized
|
@memoized
|
||||||
def tenant_quota_usages(request):
|
def tenant_quota_usages(request):
|
||||||
# Get our quotas and construct our usage object.
|
# Get our quotas and construct our usage object.
|
||||||
|
disabled_quotas = []
|
||||||
|
if not is_service_enabled(request, 'volume'):
|
||||||
|
disabled_quotas.extend(['volumes', 'gigabytes'])
|
||||||
|
|
||||||
usages = QuotaUsage()
|
usages = QuotaUsage()
|
||||||
for quota in get_tenant_quota_data(request):
|
for quota in get_tenant_quota_data(request, disabled_quotas):
|
||||||
usages.add_quota(quota)
|
usages.add_quota(quota)
|
||||||
|
|
||||||
# Get our usages.
|
# Get our usages.
|
||||||
floating_ips = nova.tenant_floating_ip_list(request)
|
floating_ips = nova.tenant_floating_ip_list(request)
|
||||||
flavors = dict([(f.id, f) for f in nova.flavor_list(request)])
|
flavors = dict([(f.id, f) for f in nova.flavor_list(request)])
|
||||||
volumes = cinder.volume_list(request)
|
|
||||||
instances = nova.server_list(request)
|
instances = nova.server_list(request)
|
||||||
# Fetch deleted flavors if necessary.
|
# Fetch deleted flavors if necessary.
|
||||||
missing_flavors = [instance.flavor['id'] for instance in instances
|
missing_flavors = [instance.flavor['id'] for instance in instances
|
||||||
@ -97,8 +101,11 @@ def tenant_quota_usages(request):
|
|||||||
|
|
||||||
usages.tally('instances', len(instances))
|
usages.tally('instances', len(instances))
|
||||||
usages.tally('floating_ips', len(floating_ips))
|
usages.tally('floating_ips', len(floating_ips))
|
||||||
usages.tally('volumes', len(volumes))
|
|
||||||
|
if 'volumes' not in disabled_quotas:
|
||||||
|
volumes = cinder.volume_list(request)
|
||||||
usages.tally('gigabytes', sum([int(v.size) for v in volumes]))
|
usages.tally('gigabytes', sum([int(v.size) for v in volumes]))
|
||||||
|
usages.tally('volumes', len(volumes))
|
||||||
|
|
||||||
# Sum our usage based on the flavors of the instances.
|
# Sum our usage based on the flavors of the instances.
|
||||||
for flavor in [flavors[instance.flavor['id']] for instance in instances]:
|
for flavor in [flavors[instance.flavor['id']] for instance in instances]:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user