Sync distil project structure with internal one
We will lose commits history for some reason, so dump the commits history as the following: db927106e7362f25384b1e7113d92538ba100f38 Remove residual oerplib dependency 42d8a04a8c66299c4c4c58643c89f043ee3b3f13 Fix import error 202937f0d8b4f3dd5f18e989a6e2dfb3a633fa8c Switch oerplib to odoorpc 2e307f84e5f8e93da67fd4c32683c246237c864d Add update-quote function 0c9d8c29ae112f9758347eba153e1fb5fed8b482 Set the correct sales team df26990838c0d7b0c6721825989706d07747e92a Support traffic billing for odoo script 2c75433a29fd5c42a2a29b41617b5fa4e42103b4 Add retry support and audit 97aadd2179496255c06f5c1c2a9368500ea74588 packaging: python-distil depends on client 021ebf0e21a6e42ed853b507fb4f16daaad14b49 Bump changelog to 0.5.10 604b906afac25c9c64862799a0d12565db2064a5 Fix network service tranform issue 956c8368435bd9ff3bba45bdde18fb72102a9d5c Skip traffic data a404539dd7d0fa2e19fd2f5536bbd0c18b3bc468 Prep for Odoo migration f10743440375cdc5512d0c4fff8b239cbdf42e0d Fix changes for Debian packaging b76536576e63848aeaa29326a0bac2cf8092e935 Bump version number and add myself to uploaders 7af68dcc4b521e5851a5f18cc647f4312bfd7fa4 fixing an error if counter volume is None f7ec36305e174cd3b1c77cf96fd9025e7774c420 Fix the multithread issue of strptime 1b16e9bfb4d5e97bd071569d3bca864d6b236cca add debian packaging 9149898015c8d4f859b316a85608bea905a3ca62 Support dynamic source 4dd95ba8be2d8d8a58bb0d6a0ba80cadfb54dd75 dropping near zero entries 766bee76234ffcf71cd0cdd13e1e3f677389390e Merge multi region in odoo-glue eddc940dbe9cdbcdf9109e3553ffe65ad80c1496 Make it free for one network and one router 2bb70fe2ecddde3b5db1fa64fc1416bc8835ee93 Don't read rates file everytime accf74e36519a116acf909cab85cd070dba998a0 version bump 7035605a9ff1e42a1120ee438dfe6de413bf2b76 fetch resource metadata in bulk when handling get_usage/get_rated. 5c6f7c0831b9560d85ce33e690c6d299470b7de8 version bump to 0.5.2 5a6233e18aa0e79bf6504f8fbe9bd04d7b4afa39 update rates 6f985422ea42f7ac545ed66e62a18760f14e3913 update odoo glue scripts for distilclient-0.5.1 api break 33c54fe6fc892d4b5621170f5e31ee9df63ed287 Tweak distilclient packaging to not require custom PYTHONPATH dc22431354ff96b02fb4def6b010575cc1f1be72 get rid of product-name-in-line-description kludge 10925492548183749cac77202d71e38291bb5440 setting up memcache 1e106cf1e012102a3cd49af543401826e67eb6a9 version bump to 0.5.0 ac29b39c629b466dcf2e04655b0491d5eb00040b Second uptime transformer for instance metric 10e37ad6b4c222efbfe0f88261ecd4fb89df93e4 adding another state to billable states 0e647ead7f6a990d279211caf2993f963372032d client: Rename some functions for clarity 7280ed585b0524c4376ef89116210f7c68ae94e4 Enable the tox for Distil 6daafca0a9f0b2a8688b9906fbea320eb4d40b2f Removing sales-order functionality from the api c12a41d5b67eb8b95abcfbc2d9eeb96cc56e8b2c version bump to 0.4.3 af86e1af31efb14ff6dac39f0c3b642fd4a4e0c4 distil: Region awareness for nova and cinder lookups bd71c7106093d2a178417f042201e8ee68ac9929 odoo: Add glue script to set current prices in distil 5a8b1596f4f08c2ea060d0da9dac844d6ae8039d odoo: Make region-aware 540eb5234f139bf9c51eb60571a81bda25cd9dd7 Import odoo-glue tree into odoo/ 8ed7c74f91d2ffd6c0b1ac50274e238f9e020502 Minor fix to tenant_validation fed3c8d4d86e09750ac3fa324bded977abe698f5 Require admin or owner bffebdf974f7e1aad210ff99040424a8f2da19f2 adding missing package depends for cinderclient 6198f1ec1fdbe0390a43571320159317e7dffa27 add toplevel Change-Id: I81581ca2aaee006daad2c1071068c651865ebb32
This commit is contained in:
parent
68ef75981d
commit
3149d560c1
8
.gitignore
vendored
8
.gitignore
vendored
@ -32,7 +32,15 @@ bin/pybabel
|
||||
bin/python
|
||||
bin/python2
|
||||
bin/python2.7
|
||||
.project
|
||||
.pydevproject
|
||||
.testrepository/
|
||||
.tox/
|
||||
.venv/
|
||||
bin/logs/
|
||||
bin/waitress-serve
|
||||
local/
|
||||
test_vm/
|
||||
env/
|
||||
ChangeLog
|
||||
.idea
|
||||
|
4
Makefile
4
Makefile
@ -1,5 +1,5 @@
|
||||
|
||||
VERSION=0.4.2
|
||||
VERSION=0.5.3
|
||||
NAME=distil
|
||||
INSTALL_PATH=/opt/stack/distil
|
||||
BINARY_PATH=/opt/stack/distil
|
||||
@ -41,6 +41,7 @@ deb: clean init
|
||||
--depends python-virtualenv \
|
||||
--depends python-sqlalchemy \
|
||||
--depends python-keystoneclient \
|
||||
--depends python-cinderclient \
|
||||
--depends python-requests \
|
||||
--depends python-flask \
|
||||
--depends python-novaclient \
|
||||
@ -48,6 +49,7 @@ deb: clean init
|
||||
--depends python-mysqldb \
|
||||
--depends python-psycopg2 \
|
||||
--depends python-yaml \
|
||||
--depends python-memcache \
|
||||
--template-scripts \
|
||||
--template-value install_path=${INSTALL_PATH} \
|
||||
-C ${WORK_DIR} \
|
||||
|
@ -1,6 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
DISTILPATH=/opt/stack/distilclient
|
||||
DISTILPATH=/usr/lib/python2.7/dist-packages/distilclient
|
||||
|
||||
export PYTHONPATH=${DISTILPATH}:${PYTHONPATH}
|
||||
python ${DISTILPATH}/client/shell.py "$@"
|
||||
python ${DISTILPATH}/shell.py "$@"
|
||||
|
@ -1,7 +1,7 @@
|
||||
|
||||
VERSION=0.4.2
|
||||
VERSION=0.5.3
|
||||
NAME=distilclient
|
||||
INSTALL_PATH=/opt/stack/distilclient
|
||||
INSTALL_PATH=/usr/lib/python2.7/dist-packages/distilclient
|
||||
BINARY_PATH=/usr/local/bin
|
||||
|
||||
WORK_DIR=./work-client
|
||||
@ -18,9 +18,10 @@ init:
|
||||
@mkdir -p ${WORK_DIR}${BINARY_PATH}
|
||||
|
||||
deb: clean init
|
||||
@mkdir -p ${WORK_DIR}${INSTALL_PATH}/client
|
||||
@mkdir -p ${WORK_DIR}${INSTALL_PATH}
|
||||
@cp ./bin/distil ${WORK_DIR}${BINARY_PATH}/distil
|
||||
@cp -r ./client/*.py ${WORK_DIR}${INSTALL_PATH}/client/
|
||||
@cp -r ./client/*.py ${WORK_DIR}${INSTALL_PATH}/
|
||||
@cp __init__.py ${WORK_DIR}${INSTALL_PATH}/
|
||||
@chmod 0755 ${WORK_DIR}${BINARY_PATH}/distil
|
||||
@fpm -s dir -t deb -n ${NAME} -v ${VERSION} \
|
||||
--depends python2.7 \
|
||||
|
@ -16,7 +16,6 @@ import requests
|
||||
from keystoneclient.v2_0.client import Client as Keystone
|
||||
from requests.exceptions import ConnectionError
|
||||
from urlparse import urljoin
|
||||
import json
|
||||
|
||||
|
||||
class Client(object):
|
||||
@ -55,7 +54,7 @@ class Client(object):
|
||||
endpoint_type=os_endpoint_type
|
||||
)
|
||||
|
||||
def usage(self):
|
||||
def collect_usage(self):
|
||||
url = urljoin(self.endpoint, "collect_usage")
|
||||
|
||||
headers = {"Content-Type": "application/json",
|
||||
@ -90,17 +89,20 @@ class Client(object):
|
||||
print e
|
||||
|
||||
def get_usage(self, tenant, start, end):
|
||||
url = urljoin(self.endpoint, "get_usage")
|
||||
return self._query_usage(tenant, start, end, "get_usage")
|
||||
|
||||
headers = {
|
||||
"X-Auth-Token": self.auth_token
|
||||
}
|
||||
def get_rated(self, tenant, start, end):
|
||||
return self._query_usage(tenant, start, end, "get_rated")
|
||||
|
||||
params = {
|
||||
"tenant": tenant,
|
||||
"start": start,
|
||||
"end": end
|
||||
}
|
||||
def _query_usage(self, tenant, start, end, endpoint):
|
||||
url = urljoin(self.endpoint, endpoint)
|
||||
|
||||
headers = {"X-Auth-Token": self.auth_token}
|
||||
|
||||
params = {"tenant": tenant,
|
||||
"start": start,
|
||||
"end": end
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.get(url, headers=headers,
|
||||
@ -108,52 +110,8 @@ class Client(object):
|
||||
verify=not self.insecure)
|
||||
if response.status_code != 200:
|
||||
raise AttributeError("Get usage failed: %s code: %s" %
|
||||
(response.text, response.status_code))
|
||||
(response.text, response.status_code))
|
||||
else:
|
||||
return response.json()
|
||||
except ConnectionError as e:
|
||||
print e
|
||||
|
||||
def _sales_order_query(self, tenants, relative_url, make_data):
|
||||
url = urljoin(self.endpoint, relative_url)
|
||||
|
||||
headers = {"Content-Type": "application/json",
|
||||
"X-Auth-Token": self.auth_token}
|
||||
|
||||
tenants_resp = {'sales_orders': [], 'errors': {}}
|
||||
for tenant in tenants:
|
||||
data = make_data(tenant)
|
||||
try:
|
||||
response = requests.post(url, headers=headers,
|
||||
data=json.dumps(data),
|
||||
verify=not self.insecure)
|
||||
if response.status_code != 200:
|
||||
error = ("Sales order cycle failed: %s Code: %s" %
|
||||
(response.text, response.status_code))
|
||||
tenants_resp['errors'][tenant] = error
|
||||
else:
|
||||
tenants_resp['sales_orders'].append(response.json())
|
||||
except ConnectionError as e:
|
||||
print e
|
||||
return tenants_resp
|
||||
|
||||
def sales_order(self, tenants, end, draft):
|
||||
return self._sales_order_query(
|
||||
tenants,
|
||||
'sales_draft' if draft else 'sales_order',
|
||||
lambda tenant: {'tenant': tenant, 'end': end}
|
||||
)
|
||||
|
||||
def sales_historic(self, tenants, date):
|
||||
return self._sales_order_query(
|
||||
tenants,
|
||||
'sales_historic',
|
||||
lambda tenant: {'tenant': tenant, 'date': date}
|
||||
)
|
||||
|
||||
def sales_range(self, tenants, start, end):
|
||||
return self._sales_order_query(
|
||||
tenants,
|
||||
'sales_range',
|
||||
lambda tenant: {'tenant': tenant, 'start': start, 'end': end}
|
||||
)
|
||||
|
@ -110,58 +110,22 @@ if __name__ == '__main__':
|
||||
help="End time",
|
||||
required=True)
|
||||
|
||||
sales_parser = subparsers.add_parser(
|
||||
'sales-order',
|
||||
help=('create sales orders for given tenants'))
|
||||
sales_parser.add_argument(
|
||||
"-t", "--tenant", dest="tenants",
|
||||
help='Tenants to create sales orders for.',
|
||||
action="append", default=[],
|
||||
required=True)
|
||||
sales_parser.add_argument(
|
||||
"-e", "--end", dest="end",
|
||||
help='end date for sales order.')
|
||||
get_rated_parser = subparsers.add_parser(
|
||||
'get-rated', help=('get rated usage'))
|
||||
|
||||
draft_parser = subparsers.add_parser(
|
||||
'sales-draft',
|
||||
help=('create sales drafts for given tenants'))
|
||||
draft_parser.add_argument(
|
||||
"-t", "--tenant", dest="tenants",
|
||||
help='Tenants to create sales drafts for.',
|
||||
action="append", required=True)
|
||||
draft_parser.add_argument(
|
||||
"-e", "--end", dest="end",
|
||||
help='end date for sales order.')
|
||||
|
||||
historic_parser = subparsers.add_parser(
|
||||
'sales-historic',
|
||||
help=('regenerate historic sales orders for given tenants,' +
|
||||
'at given date'))
|
||||
historic_parser.add_argument(
|
||||
"-t", "--tenant", dest="tenants",
|
||||
help='Tenants to create sales drafts for.',
|
||||
action="append", required=True)
|
||||
historic_parser.add_argument(
|
||||
"-d", "--date", dest="date",
|
||||
help='target search date for sales order.',
|
||||
get_rated_parser.add_argument(
|
||||
"-t", "--tenant", dest="tenant",
|
||||
help='Tenant to get usage for',
|
||||
required=True)
|
||||
|
||||
range_parser = subparsers.add_parser(
|
||||
'sales-range',
|
||||
help=('regenerate historic sales orders for given tenants,' +
|
||||
'in a given range'))
|
||||
range_parser.add_argument(
|
||||
"-t", "--tenant", dest="tenants",
|
||||
help='Tenants to create sales drafts for.',
|
||||
action="append", required=True)
|
||||
range_parser.add_argument(
|
||||
get_rated_parser.add_argument(
|
||||
"-s", "--start", dest="start",
|
||||
help='start of range for sales orders.',
|
||||
help="Start time",
|
||||
required=True)
|
||||
range_parser.add_argument(
|
||||
|
||||
get_rated_parser.add_argument(
|
||||
"-e", "--end", dest="end",
|
||||
help='end of range for sales orders. Defaults to now.',
|
||||
default=None)
|
||||
help="End time")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@ -202,7 +166,7 @@ if __name__ == '__main__':
|
||||
kwargs.get('os_endpoint_type', None))
|
||||
|
||||
if args.command == 'collect-usage':
|
||||
response = client.usage()
|
||||
response = client.collect_usage()
|
||||
print json.dumps(response, indent=2)
|
||||
|
||||
if args.command == 'last-collected':
|
||||
@ -213,18 +177,6 @@ if __name__ == '__main__':
|
||||
response = client.get_usage(args.tenant, args.start, args.end)
|
||||
print json.dumps(response, indent=2)
|
||||
|
||||
if args.command == 'sales-order':
|
||||
response = client.sales_order(args.tenants, args.end, False)
|
||||
print json.dumps(response, indent=2)
|
||||
|
||||
if args.command == 'sales-draft':
|
||||
response = client.sales_order(args.tenants, args.end, True)
|
||||
print json.dumps(response, indent=2)
|
||||
|
||||
if args.command == 'sales-historic':
|
||||
response = client.sales_historic(args.tenants, args.date)
|
||||
print json.dumps(response, indent=2)
|
||||
|
||||
if args.command == 'sales-range':
|
||||
response = client.sales_range(args.tenants, args.start, args.end)
|
||||
if args.command == 'get-rated':
|
||||
response = client.get_rated(args.tenant, args.start, args.end)
|
||||
print json.dumps(response, indent=2)
|
||||
|
7
debian/.gitignore
vendored
Normal file
7
debian/.gitignore
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
files
|
||||
*.debhelper.log
|
||||
*.postinst.debhelper
|
||||
*.prerm.debhelper
|
||||
*.substvars
|
||||
python-distil/
|
||||
python-distilclient
|
57
debian/changelog
vendored
Normal file
57
debian/changelog
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
distil (0.5.11) unstable; urgency=medium
|
||||
|
||||
[Xav Paice]
|
||||
* Add a versioned recommends for distilclient on distil to keep them
|
||||
synchronised.
|
||||
|
||||
-- Andrew Ruthven <puck@catalyst.net.nz> Mon, 07 Sep 2015 22:20:42 +1200
|
||||
|
||||
distil (0.5.10) unstable; urgency=medium
|
||||
|
||||
* Prep for Odoo migration
|
||||
* Skip traffic data
|
||||
* Fix network service transform
|
||||
|
||||
-- Andrew Ruthven <puck@catalyst.net.nz> Fri, 04 Sep 2015 15:36:15 +1200
|
||||
|
||||
distil (0.5.9) unstable; urgency=medium
|
||||
|
||||
* Install into /usr/lib, not /usr/usr/lib.
|
||||
|
||||
-- Andrew Ruthven <puck@catalyst.net.nz> Wed, 29 Jul 2015 22:50:20 +1200
|
||||
|
||||
distil (0.5.8) unstable; urgency=medium
|
||||
|
||||
* Fix dependencies.
|
||||
|
||||
-- Andrew Ruthven <puck@catalyst.net.nz> Wed, 29 Jul 2015 00:43:20 +1200
|
||||
|
||||
distil (0.5.7) unstable; urgency=medium
|
||||
|
||||
* Actually deploy content in python-distil.
|
||||
|
||||
-- Andrew Ruthven <puck@catalyst.net.nz> Wed, 29 Jul 2015 00:29:27 +1200
|
||||
|
||||
distil (0.5.6) unstable; urgency=medium
|
||||
|
||||
* Add provides and conflicts fields for old package names.
|
||||
|
||||
-- Andrew Ruthven <puck@catalyst.net.nz> Tue, 28 Jul 2015 21:14:01 +1200
|
||||
|
||||
distil (0.5.5) unstable; urgency=medium
|
||||
|
||||
* Bump the version.
|
||||
|
||||
-- Andrew Ruthven <puck@catalyst.net.nz> Tue, 28 Jul 2015 17:29:00 +1200
|
||||
|
||||
distil (0.5.4) unstable; urgency=medium
|
||||
|
||||
* Bump the version.
|
||||
|
||||
-- Andrew Ruthven <puck@catalyst.net.nz> Tue, 28 Jul 2015 11:08:35 +1200
|
||||
|
||||
distil (0.5.3) unstable; urgency=low
|
||||
|
||||
* source package automatically created by stdeb 0.6.0+git
|
||||
|
||||
-- OpenStack <openstack-dev@lists.openstack.org> Wed, 01 Jul 2015 11:32:23 +1200
|
1
debian/compat
vendored
Normal file
1
debian/compat
vendored
Normal file
@ -0,0 +1 @@
|
||||
7
|
43
debian/control
vendored
Normal file
43
debian/control
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
Source: distil
|
||||
Maintainer: OpenStack <openstack-dev@lists.openstack.org>
|
||||
Uploaders: Andrew Ruthven <puck@catalyst.net.nz>
|
||||
Section: python
|
||||
Priority: optional
|
||||
Build-Depends: python-setuptools (>= 0.6b3), python-all (>= 2.6.6-3), debhelper (>= 7), dh-python, python-yaml
|
||||
Standards-Version: 3.9.1
|
||||
X-Python-Version: >= 2.7
|
||||
|
||||
Package: python-distil
|
||||
Architecture: all
|
||||
Depends:
|
||||
${misc:Depends},
|
||||
${python:Depends}
|
||||
Recommends: python-distilclient (= ${binary:Version})
|
||||
Provides: distil
|
||||
Replaces: distil
|
||||
Conflicts: distil
|
||||
Description: Distil project
|
||||
Distil is a web app to provide easy interactions with ERP systems, by
|
||||
exposing a configurable set of collection tools and transformers to make
|
||||
usable billing data out of Ceilometer entries.
|
||||
.
|
||||
Distil provides a rest api to integrate with arbitrary ERP systems, and
|
||||
returns sales orders as json. What the ranges are, and how Ceilometer data
|
||||
is aggregated is intended to be configurable, and defined in the configuration
|
||||
file.
|
||||
.
|
||||
The Distil data store will prevent overlapping bills for a given tenant and
|
||||
resource ever being stored, while still allowing for regeneration of a given
|
||||
|
||||
Package: python-distilclient
|
||||
Architecture: all
|
||||
Depends: ${misc:Depends}, ${python:Depends}, python2.7, python-keystoneclient, python-requests
|
||||
Provides: distilclient
|
||||
Replaces: distilclient
|
||||
Conflicts: distilclient
|
||||
Description: Client interface for Distil project
|
||||
Distil is a web app to provide easy interactions with ERP systems, by
|
||||
exposing a configurable set of collection tools and transformers to make
|
||||
usable billing data out of Ceilometer entries.
|
||||
.
|
||||
This package provides a client to interact with the Distil web app.
|
2
debian/pydist-overrides
vendored
Normal file
2
debian/pydist-overrides
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
pyaml python-yaml
|
||||
PyMySQL python-mysqldb
|
2
debian/python-distil.install
vendored
Normal file
2
debian/python-distil.install
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
usr/lib /usr
|
||||
work-api/etc/distil/ /etc
|
2
debian/python-distilclient.install
vendored
Normal file
2
debian/python-distilclient.install
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
bin/distil usr/bin
|
||||
client/*.py /usr/lib/python2.7/dist-packages/distilclient
|
7
debian/rules
vendored
Executable file
7
debian/rules
vendored
Executable file
@ -0,0 +1,7 @@
|
||||
#!/usr/bin/make -f
|
||||
|
||||
# This file was automatically generated by stdeb 0.6.0+git at
|
||||
# Wed, 01 Jul 2015 11:32:23 +1200
|
||||
|
||||
%:
|
||||
dh "$@" --with python2 --buildsystem=python_distutils
|
1
debian/source/format
vendored
Normal file
1
debian/source/format
vendored
Normal file
@ -0,0 +1 @@
|
||||
3.0 (native)
|
1
debian/source/options
vendored
Normal file
1
debian/source/options
vendored
Normal file
@ -0,0 +1 @@
|
||||
extend-diff-ignore="\.egg-info"
|
16
distil/NoPickle.py
Normal file
16
distil/NoPickle.py
Normal file
@ -0,0 +1,16 @@
|
||||
|
||||
|
||||
class NoPickling(BaseException):
|
||||
"""Should not be pickling"""
|
||||
|
||||
|
||||
class NoPickle(object):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def dump(self, value):
|
||||
raise NoPickling("Pickling is not allowed!")
|
||||
|
||||
def load(self, value):
|
||||
raise NoPickling("Unpickling is not allowed!")
|
@ -45,9 +45,17 @@ def must(*args, **kwargs):
|
||||
|
||||
@decorator
|
||||
def returns_json(func, *args, **kwargs):
|
||||
"""Dumps content into a json and makes a response.
|
||||
NOTE: If content is already a string assumes it is json."""
|
||||
status, content = func(*args, **kwargs)
|
||||
|
||||
if isinstance(content, str):
|
||||
content_json = content
|
||||
else:
|
||||
content_json = json.dumps(content)
|
||||
|
||||
response = flask.make_response(
|
||||
json.dumps(content), status)
|
||||
content_json, status)
|
||||
response.headers['Content-type'] = 'application/json'
|
||||
return response
|
||||
|
||||
@ -73,7 +81,7 @@ def validate_tenant_id(tenant_id, session):
|
||||
"""Tenant ID validation that check that the id you passed is valid,
|
||||
and that a tenant with this ID exists.
|
||||
- returns tenant query, or a tuple if validation failure."""
|
||||
if isinstance(tenant_id, unicode):
|
||||
if isinstance(tenant_id, basestring):
|
||||
tenant_query = session.query(Tenant).\
|
||||
filter(Tenant.id == tenant_id)
|
||||
if tenant_query.count() == 0:
|
||||
@ -90,6 +98,23 @@ def require_admin(func, *args, **kwargs):
|
||||
if config.auth.get('authenticate_clients'):
|
||||
roles = flask.request.headers['X-Roles'].split(',')
|
||||
if 'admin' not in roles:
|
||||
return flask.make_response(403, "Must be admin")
|
||||
return flask.make_response("Must be admin", 403)
|
||||
|
||||
return func(*args, **kwargs)
|
||||
|
||||
|
||||
@decorator
|
||||
def require_admin_or_owner(func, *args, **kwargs):
|
||||
if config.auth.get('authenticate_clients'):
|
||||
roles = flask.request.headers['X-Roles'].split(',')
|
||||
tenant_id = flask.request.headers['X-tenant-id']
|
||||
json_tenant_id = (None if not flask.request.json
|
||||
else flask.request.json['tenant'])
|
||||
args_tenant_id = flask.request.args.get('tenant')
|
||||
request_tenant_id = json_tenant_id or args_tenant_id
|
||||
if 'admin' in roles or tenant_id == request_tenant_id:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return flask.make_response("Must be admin or the tenant owner.", 403)
|
||||
|
||||
return func(*args, **kwargs)
|
||||
|
@ -13,25 +13,32 @@
|
||||
# under the License.
|
||||
|
||||
import flask
|
||||
import hashlib
|
||||
import re
|
||||
from distil.NoPickle import NoPickle
|
||||
from flask import Flask, Blueprint
|
||||
from distil import database, config
|
||||
from distil.constants import iso_time, iso_date, dawn_of_time
|
||||
from distil.transformers import active_transformers as transformers
|
||||
from distil.rates import RatesFile
|
||||
from distil.models import SalesOrder, _Last_Run
|
||||
from distil.models import _Last_Run
|
||||
from distil.helpers import convert_to, reset_cache
|
||||
from distil.interface import Interface, timed
|
||||
from sqlalchemy import create_engine, func
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import scoped_session, create_session
|
||||
from sqlalchemy.pool import NullPool
|
||||
from sqlalchemy.exc import IntegrityError, OperationalError
|
||||
# Fix the the multithread issue when using strptime, based on this link:
|
||||
# stackoverflow.com/questions/2427240/thread-safe-equivalent-to-pythons-time-strptime # noqa
|
||||
import _strptime
|
||||
from datetime import datetime, timedelta
|
||||
from decimal import Decimal
|
||||
import json
|
||||
import logging as log
|
||||
from keystoneclient.middleware.auth_token import AuthProtocol as KeystoneMiddleware
|
||||
from keystonemiddleware import auth_token
|
||||
|
||||
from .helpers import returns_json, json_must, validate_tenant_id, require_admin
|
||||
from .helpers import require_admin_or_owner
|
||||
from urlparse import urlparse
|
||||
|
||||
|
||||
@ -39,10 +46,18 @@ engine = None
|
||||
|
||||
Session = None
|
||||
|
||||
memcache = None
|
||||
|
||||
app = Blueprint("main", __name__)
|
||||
|
||||
DEFAULT_TIMEZONE = "Pacific/Auckland"
|
||||
|
||||
RATES = None
|
||||
|
||||
# Double confirm by:
|
||||
# http://blog.namis.me/2012/02/14/python-strptime-is-not-thread-safe/
|
||||
dumy_call = datetime.strptime("2011-04-05 18:40:58.525996",
|
||||
"%Y-%m-%d %H:%M:%S.%f")
|
||||
|
||||
def get_app(conf):
|
||||
actual_app = Flask(__name__)
|
||||
@ -65,6 +80,8 @@ def get_app(conf):
|
||||
format='%(asctime)s %(message)s')
|
||||
log.info("Billing API started.")
|
||||
|
||||
setup_memcache()
|
||||
|
||||
# if configured to authenticate clients, then wrap the
|
||||
# wsgi app in the keystone middleware.
|
||||
if config.auth.get('authenticate_clients'):
|
||||
@ -77,11 +94,22 @@ def get_app(conf):
|
||||
'auth_port': identity_url.port,
|
||||
'auth_protocol': identity_url.scheme
|
||||
}
|
||||
actual_app = KeystoneMiddleware(actual_app, conf)
|
||||
actual_app = auth_token.AuthProtocol(actual_app, conf)
|
||||
|
||||
return actual_app
|
||||
|
||||
|
||||
def setup_memcache():
|
||||
if config.memcache['enabled']:
|
||||
log.info("Memcache enabled.")
|
||||
import memcache as memcached
|
||||
global memcache
|
||||
memcache = memcached.Client(config.memcache['addresses'],
|
||||
pickler=NoPickle, unpickler=NoPickle)
|
||||
else:
|
||||
log.info("Memcache disabled.")
|
||||
|
||||
|
||||
@app.route("last_collected", methods=["GET"])
|
||||
@returns_json
|
||||
@require_admin
|
||||
@ -116,8 +144,13 @@ def filter_and_group(usage, usage_by_resource):
|
||||
# billing.
|
||||
# if we have a list of trust sources configured, then
|
||||
# discard everything not matching.
|
||||
if trust_sources and u['source'] not in trust_sources:
|
||||
log.warning('ignoring untrusted usage sample ' +
|
||||
# NOTE(flwang): When posting samples by ceilometer REST API, it
|
||||
# will use the format <tenant_id>:<source_name_from_user>
|
||||
# so we need to use a regex to recognize it.
|
||||
if (trust_sources and
|
||||
all([not re.match(source, u['source'])
|
||||
for source in trust_sources]) == True):
|
||||
log.warning('Ignoring untrusted usage sample ' +
|
||||
'from source `%s`' % u['source'])
|
||||
continue
|
||||
|
||||
@ -127,7 +160,7 @@ def filter_and_group(usage, usage_by_resource):
|
||||
|
||||
|
||||
def transform_and_insert(tenant, usage_by_resource, transformer, service,
|
||||
meter_info, window_start, window_end,
|
||||
mapping, window_start, window_end,
|
||||
db, timestamp):
|
||||
with timed("apply transformer + insert"):
|
||||
for res, entries in usage_by_resource.items():
|
||||
@ -136,14 +169,14 @@ def transform_and_insert(tenant, usage_by_resource, transformer, service,
|
||||
service, entries, window_start, window_end)
|
||||
|
||||
if transformed:
|
||||
res = meter_info.get('res_id_template', '%s') % res
|
||||
res = mapping.get('res_id_template', '%s') % res
|
||||
|
||||
md_def = meter_info['metadata']
|
||||
md_def = mapping['metadata']
|
||||
|
||||
db.insert_resource(tenant.id, res, meter_info['type'],
|
||||
db.insert_resource(tenant.id, res, mapping['type'],
|
||||
timestamp, entries[-1], md_def)
|
||||
db.insert_usage(tenant.id, res, transformed,
|
||||
meter_info['unit'], window_start,
|
||||
mapping['unit'], window_start,
|
||||
window_end, timestamp)
|
||||
|
||||
|
||||
@ -175,21 +208,21 @@ def collect_usage(tenant, db, session, resp, end):
|
||||
|
||||
mappings = config.collection['meter_mappings']
|
||||
|
||||
for meter_name, meter_info in mappings.items():
|
||||
usage = tenant.usage(meter_name, window_start, window_end)
|
||||
for mapping in mappings:
|
||||
usage = tenant.usage(mapping['meter'], window_start, window_end)
|
||||
usage_by_resource = {}
|
||||
|
||||
transformer = transformers[meter_info['transformer']]()
|
||||
transformer = transformers[mapping['transformer']]()
|
||||
|
||||
filter_and_group(usage, usage_by_resource)
|
||||
|
||||
if 'service' in meter_info:
|
||||
service = meter_info['service']
|
||||
if 'service' in mapping:
|
||||
service = mapping['service']
|
||||
else:
|
||||
service = meter_name
|
||||
service = mapping['meter']
|
||||
|
||||
transform_and_insert(tenant, usage_by_resource,
|
||||
transformer, service, meter_info,
|
||||
transformer, service, mapping,
|
||||
window_start, window_end, db,
|
||||
timestamp)
|
||||
|
||||
@ -271,6 +304,7 @@ def run_usage_collection():
|
||||
trace = traceback.format_exc()
|
||||
log.critical('Exception escaped! %s \nTrace: \n%s' % (e, trace))
|
||||
|
||||
|
||||
def make_serializable(obj):
|
||||
if isinstance(obj, list):
|
||||
return [make_serializable(x) for x in obj]
|
||||
@ -282,9 +316,10 @@ def make_serializable(obj):
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
@app.route("get_usage", methods=["GET"])
|
||||
@require_admin_or_owner
|
||||
@returns_json
|
||||
@require_admin
|
||||
def get_usage():
|
||||
"""
|
||||
Get raw aggregated usage for a tenant, in a given timespan.
|
||||
@ -320,32 +355,117 @@ def get_usage():
|
||||
|
||||
log.info("parameter validation ok")
|
||||
|
||||
if memcache is not None:
|
||||
key = make_key("raw_usage", tenant_id, start, end)
|
||||
|
||||
data = memcache.get(key)
|
||||
if data is not None:
|
||||
log.info("Returning memcache raw data for %s in range: %s - %s" %
|
||||
(tenant_id, start, end))
|
||||
return 200, data
|
||||
|
||||
log.info("Calculating raw data for %s in range: %s - %s" %
|
||||
(tenant_id, start, end))
|
||||
|
||||
# aggregate usage
|
||||
usage = db.usage(start, end, tenant_id)
|
||||
tenant_dict = build_tenant_dict(valid_tenant, usage, db)
|
||||
|
||||
return 200, {'usage': make_serializable(tenant_dict)}
|
||||
response_json = json.dumps({'usage': make_serializable(tenant_dict)})
|
||||
|
||||
if memcache is not None:
|
||||
memcache.set(key, response_json)
|
||||
|
||||
return 200, response_json
|
||||
|
||||
|
||||
@app.route("get_rated", methods=["GET"])
|
||||
@require_admin_or_owner
|
||||
@returns_json
|
||||
def get_rated():
|
||||
"""
|
||||
Get rated aggregated usage for a tenant, in a given timespan.
|
||||
Rates used are those at the 'start' of the timespan.
|
||||
-tenant_id: tenant to get data for.
|
||||
-start: a given start for the range.
|
||||
-end: a given end for the range, defaults to now.
|
||||
"""
|
||||
tenant_id = flask.request.args.get('tenant', None)
|
||||
start = flask.request.args.get('start', None)
|
||||
end = flask.request.args.get('end', None)
|
||||
|
||||
try:
|
||||
if start is not None:
|
||||
try:
|
||||
start = datetime.strptime(start, iso_date)
|
||||
except ValueError:
|
||||
start = datetime.strptime(start, iso_time)
|
||||
else:
|
||||
return 400, {"missing parameter": {"start": "start date" +
|
||||
" in format: y-m-d"}}
|
||||
if not end:
|
||||
end = datetime.utcnow()
|
||||
else:
|
||||
try:
|
||||
end = datetime.strptime(end, iso_date)
|
||||
except ValueError:
|
||||
end = datetime.strptime(end, iso_time)
|
||||
except ValueError:
|
||||
return 400, {
|
||||
"errors": ["'end' date given needs to be in format: " +
|
||||
"y-m-d, or y-m-dTH:M:S"]}
|
||||
|
||||
if end <= start:
|
||||
return 400, {"errors": ["end date must be greater than start."]}
|
||||
|
||||
session = Session()
|
||||
|
||||
valid_tenant = validate_tenant_id(tenant_id, session)
|
||||
if isinstance(valid_tenant, tuple):
|
||||
return valid_tenant
|
||||
|
||||
if memcache is not None:
|
||||
key = make_key("rated_usage", valid_tenant.id, start, end)
|
||||
|
||||
data = memcache.get(key)
|
||||
if data is not None:
|
||||
log.info("Returning memcache rated data for %s in range: %s - %s" %
|
||||
(valid_tenant.id, start, end))
|
||||
return 200, data
|
||||
|
||||
log.info("Calculating rated data for %s in range: %s - %s" %
|
||||
(valid_tenant.id, start, end))
|
||||
|
||||
tenant_dict = calculate_rated_data(valid_tenant, start, end, session)
|
||||
|
||||
response_json = json.dumps({'usage': tenant_dict})
|
||||
|
||||
if memcache is not None:
|
||||
memcache.set(key, response_json)
|
||||
|
||||
return 200, response_json
|
||||
|
||||
|
||||
def make_key(api_call, tenant_id, start, end):
|
||||
call_info = [config.memcache['key_prefix'], api_call,
|
||||
tenant_id, str(start), str(end)]
|
||||
return hashlib.sha256(str(call_info)).hexdigest()
|
||||
|
||||
|
||||
def build_tenant_dict(tenant, entries, db):
|
||||
"""Builds a dict structure for a given tenant."""
|
||||
tenant_dict = {'name': tenant.name, 'tenant_id': tenant.id,
|
||||
'resources': {}}
|
||||
tenant_dict = {'name': tenant.name, 'tenant_id': tenant.id}
|
||||
|
||||
all_resource_ids = {entry.resource_id for entry in entries}
|
||||
tenant_dict['resources'] = db.get_resources(all_resource_ids)
|
||||
|
||||
for entry in entries:
|
||||
service = {'name': entry.service, 'volume': entry.volume,
|
||||
'unit': entry.unit}
|
||||
'unit': entry.unit}
|
||||
|
||||
if (entry.resource_id not in tenant_dict['resources']):
|
||||
resource = db.get_resource_metadata(entry.resource_id)
|
||||
|
||||
resource['services'] = [service]
|
||||
|
||||
tenant_dict['resources'][entry.resource_id] = resource
|
||||
|
||||
else:
|
||||
resource = tenant_dict['resources'][entry.resource_id]
|
||||
resource['services'].append(service)
|
||||
resource = tenant_dict['resources'][entry.resource_id]
|
||||
service_list = resource.setdefault('services', [])
|
||||
service_list.append(service)
|
||||
|
||||
return tenant_dict
|
||||
|
||||
@ -386,226 +506,26 @@ def add_costs_for_tenant(tenant, RatesManager):
|
||||
return tenant
|
||||
|
||||
|
||||
def generate_sales_order(draft, tenant_id, end):
|
||||
"""Generates a sales order dict, and unless draft is true,
|
||||
creates a database entry for sales_order."""
|
||||
session = Session()
|
||||
def calculate_rated_data(tenant, start, end, session):
|
||||
"""Calculate a rated data dict from the given range."""
|
||||
|
||||
db = database.Database(session)
|
||||
|
||||
valid_tenant = validate_tenant_id(tenant_id, session)
|
||||
if isinstance(valid_tenant, tuple):
|
||||
return valid_tenant
|
||||
global RATES
|
||||
if not RATES:
|
||||
RATES = RatesFile(config.rates_config)
|
||||
|
||||
rates = RatesFile(config.rates_config)
|
||||
|
||||
# Get the last sales order for this tenant, to establish
|
||||
# the proper ranging
|
||||
start = session.query(func.max(SalesOrder.end).label('end')).\
|
||||
filter(SalesOrder.tenant_id == tenant_id).first().end
|
||||
if not start:
|
||||
start = dawn_of_time
|
||||
|
||||
# these coditionals need work, also some way to
|
||||
# ensure all given timedate values are in UTC?
|
||||
if end <= start:
|
||||
return 400, {"errors": ["end date must be greater than " +
|
||||
"the end of the last sales order range."]}
|
||||
if end > datetime.utcnow():
|
||||
return 400, {"errors": ["end date cannot be a future date."]}
|
||||
|
||||
usage = db.usage(start, end, tenant_id)
|
||||
|
||||
session.begin()
|
||||
if not draft:
|
||||
order = SalesOrder(tenant_id=tenant_id, start=start, end=end)
|
||||
session.add(order)
|
||||
|
||||
try:
|
||||
# Commit the record before we generate the bill, to mark this as a
|
||||
# billed region of data. Avoids race conditions by marking a tenant
|
||||
# BEFORE we start to generate the data for it.
|
||||
session.commit()
|
||||
|
||||
# Transform the query result into a billable dict.
|
||||
tenant_dict = build_tenant_dict(valid_tenant, usage, db)
|
||||
tenant_dict = add_costs_for_tenant(tenant_dict, rates)
|
||||
|
||||
# add sales order range:
|
||||
tenant_dict['start'] = str(start)
|
||||
tenant_dict['end'] = str(end)
|
||||
session.close()
|
||||
if not draft:
|
||||
log.info("Sales Order #%s Generated for %s in range: %s - %s" %
|
||||
(order.id, tenant_id, start, end))
|
||||
return 200, tenant_dict
|
||||
except (IntegrityError, OperationalError):
|
||||
session.rollback()
|
||||
session.close()
|
||||
log.warning("IntegrityError creating sales-order for " +
|
||||
"%s %s in range: %s - %s " %
|
||||
(valid_tenant.name, valid_tenant.id, start, end))
|
||||
return 400, {"id": tenant_id,
|
||||
"error": "IntegrityError, existing sales_order overlap."}
|
||||
|
||||
|
||||
def regenerate_sales_order(tenant_id, target):
|
||||
"""Finds a sales order entry nearest to the target,
|
||||
and returns a salesorder dict based on the entry."""
|
||||
session = Session()
|
||||
db = database.Database(session)
|
||||
rates = RatesFile(config.rates_config)
|
||||
|
||||
valid_tenant = validate_tenant_id(tenant_id, session)
|
||||
if isinstance(valid_tenant, tuple):
|
||||
return valid_tenant
|
||||
|
||||
try:
|
||||
sales_order = db.get_sales_orders(tenant_id, target, target)[0]
|
||||
except IndexError:
|
||||
return 400, {"errors": ["Given date not in existing sales orders."]}
|
||||
|
||||
usage = db.usage(sales_order.start, sales_order.end, tenant_id)
|
||||
usage = db.usage(start, end, tenant.id)
|
||||
|
||||
# Transform the query result into a billable dict.
|
||||
tenant_dict = build_tenant_dict(valid_tenant, usage, db)
|
||||
tenant_dict = add_costs_for_tenant(tenant_dict, rates)
|
||||
tenant_dict = build_tenant_dict(tenant, usage, db)
|
||||
tenant_dict = add_costs_for_tenant(tenant_dict, RATES)
|
||||
|
||||
# add sales order range:
|
||||
tenant_dict['start'] = str(sales_order.start)
|
||||
tenant_dict['end'] = str(sales_order.end)
|
||||
tenant_dict['start'] = str(start)
|
||||
tenant_dict['end'] = str(end)
|
||||
|
||||
return 200, tenant_dict
|
||||
|
||||
|
||||
def regenerate_sales_order_range(tenant_id, start, end):
|
||||
"""For all sales orders in a given range, generate sales order dicts,
|
||||
and return them."""
|
||||
session = Session()
|
||||
db = database.Database(session)
|
||||
rates = RatesFile(config.rates_config)
|
||||
|
||||
valid_tenant = validate_tenant_id(tenant_id, session)
|
||||
if isinstance(valid_tenant, tuple):
|
||||
return valid_tenant
|
||||
|
||||
sales_orders = db.get_sales_orders(tenant_id, start, end)
|
||||
|
||||
tenants = []
|
||||
for sales_order in sales_orders:
|
||||
usage = db.usage(sales_order.start, sales_order.end, tenant_id)
|
||||
|
||||
# Transform the query result into a billable dict.
|
||||
tenant_dict = build_tenant_dict(valid_tenant, usage, db)
|
||||
tenant_dict = add_costs_for_tenant(tenant_dict, rates)
|
||||
|
||||
# add sales order range:
|
||||
tenant_dict['start'] = str(sales_order.start)
|
||||
tenant_dict['end'] = str(sales_order.end)
|
||||
|
||||
tenants.append(tenant_dict)
|
||||
|
||||
return 200, tenants
|
||||
|
||||
|
||||
@app.route("sales_order", methods=["POST"])
|
||||
@require_admin
|
||||
@json_must()
|
||||
@returns_json
|
||||
def run_sales_order_generation():
|
||||
"""Generates a sales order for the given tenant.
|
||||
-end: a given end date, or uses default"""
|
||||
tenant_id = flask.request.json.get("tenant", None)
|
||||
end = flask.request.json.get("end", None)
|
||||
if not end:
|
||||
# Today, the beginning of.
|
||||
end = datetime.utcnow().\
|
||||
replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
else:
|
||||
try:
|
||||
end = datetime.strptime(end, iso_date)
|
||||
except ValueError:
|
||||
return 400, {"errors": ["'end' date given needs to be in format:" +
|
||||
" y-m-d"]}
|
||||
|
||||
return generate_sales_order(False, tenant_id, end)
|
||||
|
||||
|
||||
@app.route("sales_draft", methods=["POST"])
|
||||
@require_admin
|
||||
@json_must()
|
||||
@returns_json
|
||||
def run_sales_draft_generation():
|
||||
"""Generates a sales draft for the given tenant.
|
||||
-end: a given end datetime, or uses default"""
|
||||
tenant_id = flask.request.json.get("tenant", None)
|
||||
end = flask.request.json.get("end", None)
|
||||
|
||||
if not end:
|
||||
end = datetime.utcnow()
|
||||
else:
|
||||
try:
|
||||
end = datetime.strptime(end, iso_date)
|
||||
except ValueError:
|
||||
try:
|
||||
end = datetime.strptime(end, iso_time)
|
||||
except ValueError:
|
||||
return 400, {
|
||||
"errors": ["'end' date given needs to be in format: " +
|
||||
"y-m-d, or y-m-dTH:M:S"]}
|
||||
|
||||
return generate_sales_order(True, tenant_id, end)
|
||||
|
||||
|
||||
@app.route("sales_historic", methods=["POST"])
|
||||
@require_admin
|
||||
@json_must()
|
||||
@returns_json
|
||||
def run_sales_historic_generation():
|
||||
"""Returns the sales order that intersects with the given target date.
|
||||
-target: a given target date"""
|
||||
tenant_id = flask.request.json.get("tenant", None)
|
||||
target = flask.request.json.get("date", None)
|
||||
|
||||
if target is not None:
|
||||
try:
|
||||
target = datetime.strptime(target, iso_date)
|
||||
except ValueError:
|
||||
return 400, {"errors": ["date given needs to be in format: " +
|
||||
"y-m-d"]}
|
||||
else:
|
||||
return 400, {"missing parameter": {"date": "target date in format: " +
|
||||
"y-m-d"}}
|
||||
|
||||
return regenerate_sales_order(tenant_id, target)
|
||||
|
||||
|
||||
@app.route("sales_range", methods=["POST"])
|
||||
@require_admin
|
||||
@json_must()
|
||||
@returns_json
|
||||
def run_sales_historic_range_generation():
|
||||
"""Returns the sales orders that intersect with the given date range.
|
||||
-start: a given start for the range.
|
||||
-end: a given end for the range, defaults to now."""
|
||||
tenant_id = flask.request.json.get("tenant", None)
|
||||
start = flask.request.json.get("start", None)
|
||||
end = flask.request.json.get("end", None)
|
||||
|
||||
try:
|
||||
if start is not None:
|
||||
start = datetime.strptime(start, iso_date)
|
||||
else:
|
||||
return 400, {"missing parameter": {"start": "start date" +
|
||||
" in format: y-m-d"}}
|
||||
if end is not None:
|
||||
end = datetime.strptime(end, iso_date)
|
||||
else:
|
||||
end = datetime.utcnow()
|
||||
except ValueError:
|
||||
return 400, {"errors": ["dates given need to be in format: " +
|
||||
"y-m-d"]}
|
||||
|
||||
return regenerate_sales_order_range(tenant_id, start, end)
|
||||
return tenant_dict
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -15,6 +15,7 @@
|
||||
# This is simply a namespace for global config storage
|
||||
main = None
|
||||
rates_config = None
|
||||
memcache = None
|
||||
auth = None
|
||||
collection = None
|
||||
transformers = None
|
||||
@ -25,6 +26,14 @@ def setup_config(conf):
|
||||
main = conf['main']
|
||||
global rates_config
|
||||
rates_config = conf['rates_config']
|
||||
|
||||
# special case to avoid issues with older configs
|
||||
try:
|
||||
global memcache
|
||||
memcache = conf['memcache']
|
||||
except KeyError:
|
||||
memcache = {'enabled': False}
|
||||
|
||||
global auth
|
||||
auth = conf['auth']
|
||||
global collection
|
||||
|
@ -32,7 +32,7 @@ iso_time = "%Y-%m-%dT%H:%M:%S"
|
||||
iso_date = "%Y-%m-%d"
|
||||
dawn_of_time = datetime(2014, 4, 1)
|
||||
|
||||
# VM states:
|
||||
# VM states (SOON TO BE REMOVED):
|
||||
states = {'active': 1,
|
||||
'building': 2,
|
||||
'paused': 3,
|
||||
|
@ -111,11 +111,11 @@ class Database(object):
|
||||
|
||||
return query
|
||||
|
||||
def get_resource_metadata(self, resource_id):
|
||||
"""Gets the metadata for a resource and loads it into a dict."""
|
||||
info = self.session.query(Resource.info).\
|
||||
filter(Resource.id == resource_id)
|
||||
return json.loads(info[0].info)
|
||||
def get_resources(self, resource_id_list):
|
||||
"""Gets resource metadata in bulk."""
|
||||
query = self.session.query(Resource.id, Resource.info).\
|
||||
filter(Resource.id.in_(resource_id_list))
|
||||
return {row.id: json.loads(row.info) for row in query}
|
||||
|
||||
def get_sales_orders(self, tenant_id, start, end):
|
||||
"""Returns a query with all sales orders
|
||||
|
@ -12,8 +12,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from novaclient.v1_1 import client
|
||||
from cinderclient.v1 import client as cinderclient
|
||||
from novaclient import client as novaclient
|
||||
from cinderclient.v2 import client as cinderclient
|
||||
from decimal import Decimal
|
||||
import config
|
||||
import math
|
||||
@ -30,12 +30,15 @@ def reset_cache():
|
||||
|
||||
def flavor_name(f_id):
|
||||
"""Grabs the correct flavor name from Nova given the correct ID."""
|
||||
_client_class = novaclient.get_client_class(2)
|
||||
|
||||
if f_id not in cache['flavors']:
|
||||
nova = client.Client(
|
||||
nova = _client_class(
|
||||
config.auth['username'],
|
||||
config.auth['password'],
|
||||
config.auth['default_tenant'],
|
||||
config.auth['end_point'],
|
||||
region_name=config.main['region'],
|
||||
insecure=config.auth['insecure'])
|
||||
|
||||
cache['flavors'][f_id] = nova.flavors.get(f_id).name
|
||||
@ -49,6 +52,7 @@ def volume_type(volume_type):
|
||||
config.auth['password'],
|
||||
config.auth['default_tenant'],
|
||||
config.auth['end_point'],
|
||||
region=config.main['region'],
|
||||
insecure=config.auth['insecure'])
|
||||
|
||||
for vtype in cinder.volume_types.list():
|
||||
|
0
distil/tests/__init__.py
Normal file
0
distil/tests/__init__.py
Normal file
0
distil/tests/unit/__init__.py
Normal file
0
distil/tests/unit/__init__.py
Normal file
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user