Initial commit; initial layout of code and structure and SQL Alchemy hookups and such. Covers the basics of communication with an underlying database to handle billing artifacts.
Covers initial API that clients will use to interface with a given Tenant's bills.
This commit is contained in:
commit
a26d9f36f5
0
artifice/__init__.py
Normal file
0
artifice/__init__.py
Normal file
26
artifice/artifice.py
Normal file
26
artifice/artifice.py
Normal file
@ -0,0 +1,26 @@
|
||||
import yaml
|
||||
from models import session
|
||||
from interface import Artifice
|
||||
default_config = "/etc/artifice/config.yaml"
|
||||
|
||||
def connect(config=None):
|
||||
|
||||
if config is None:
|
||||
try:
|
||||
fh = open(default_config)
|
||||
except IOError:
|
||||
print "Can't open default config!"
|
||||
raise
|
||||
config = yaml.load( fh.read() )
|
||||
# conn_string = 'postgresql://%(username)s:%(password)s@%(host)s:%(port)s/%(database)s' % {
|
||||
# "username": config["database"]["username"],
|
||||
# "password": config["database"]["password"],
|
||||
# "host": config["database"]["host"],
|
||||
# "port": config["database"]["port"],
|
||||
# "database": config["database"]["database"]
|
||||
# }
|
||||
# engine = create_engine(conn_string)
|
||||
# session.configure(bind=engine)
|
||||
artifice = Artifice(config)
|
||||
# artifice.artifice = session
|
||||
return artifice
|
0
artifice/ceilometer.py
Normal file
0
artifice/ceilometer.py
Normal file
466
artifice/interface.py
Normal file
466
artifice/interface.py
Normal file
@ -0,0 +1,466 @@
|
||||
# Interfaces to the Ceilometer API
|
||||
import ceilometer
|
||||
|
||||
# Brings in HTTP support
|
||||
import requests
|
||||
import json
|
||||
|
||||
#
|
||||
import datetime
|
||||
|
||||
# Provides authentication against Openstack
|
||||
from keystoneclient.v2_0 import client
|
||||
|
||||
#
|
||||
# from .models import usage
|
||||
from .models import session, usage
|
||||
|
||||
# Date format Ceilometer uses
|
||||
# 2013-07-03T13:34:17
|
||||
# which is, as an strftime:
|
||||
# timestamp = datetime.strptime(res["timestamp"], "%Y-%m-%dT%H:%M:%S.%f")
|
||||
# or
|
||||
# timestamp = datetime.strptime(res["timestamp"], "%Y-%m-%dT%H:%M:%S")
|
||||
|
||||
# Most of the time we use date_format
|
||||
date_format = "%Y-%m-%dT%H:%M:%S"
|
||||
# Sometimes things also have milliseconds, so we look for that too.
|
||||
other_date_format = "%Y-%m-%dT%H:%M:%S.%f"
|
||||
|
||||
class NotFound(BaseException): pass
|
||||
|
||||
class Artifice(object):
|
||||
"""It's an artificer for making artifacts of billing!"""
|
||||
def __init__(self, config):
|
||||
super(Artifice, self).__init__()
|
||||
self.config = config
|
||||
|
||||
# This is the Keystone client connection, which provides our
|
||||
# OpenStack authentication
|
||||
self.auth = client.Client(
|
||||
username= config["openstack"]["username"],
|
||||
password= config["openstack"]["password"],
|
||||
tenant_name= config["openstack"]["default_tenant"],
|
||||
auth_url= config["openstack"]["authenticator"]
|
||||
# auth_url="http://localhost:35357/v2.0"
|
||||
)
|
||||
conn_string = 'postgresql://%(username)s:%(password)s@%(host)s:%(port)s/%(database)s' % {
|
||||
"username": config["database"]["username"],
|
||||
"password": config["database"]["password"],
|
||||
"host": config["database"]["host"],
|
||||
"port": config["database"]["port"],
|
||||
"database": config["database"]["database"]
|
||||
}
|
||||
engine = create_engine(conn_string)
|
||||
session.configure(bind=engine)
|
||||
self.artifice = None
|
||||
self.changes = []
|
||||
|
||||
def data_for(self, tenant=None, start=None, end=None, sections=None):
|
||||
# This is turning into a giant function blob of goo, which is ungood.
|
||||
|
||||
if tenant is None:
|
||||
raise KeyError("Missing tenant!")
|
||||
if end is None:
|
||||
end = datetime.datetime.now() - datetime.timedelta(days=1)
|
||||
|
||||
tenant = self.artifice.tenant(tenant)
|
||||
|
||||
# Okay, we've got some usefulness we can do now.
|
||||
# Tenant is expected to be a text string, not the internal ID. So, we need to convert it.
|
||||
resourcing_fields = [{"field": "project_id", "op": "eq", "value": tenant.id }]
|
||||
data_fields = []
|
||||
if start is not None:
|
||||
data_fields.append({
|
||||
"field": "timestamp", "op", "ge", "value": start.strftime(date_format)
|
||||
})
|
||||
|
||||
data_fields.append({
|
||||
"field": "timestamp", "op", "le", "value": end.strftime(date_format)
|
||||
})
|
||||
r = requests.get(
|
||||
os.path.join(self.config["ceilometer"]["host"], "v2/resources"),
|
||||
headers={"X-Auth-Token": self.auth.auth_token, "Content-Type":"application/json"},
|
||||
data=json.dumps( { "q": resourcing_fields } )
|
||||
)
|
||||
resources = json.loads(r.text)
|
||||
for resource in resources:
|
||||
for link in resource["links"]:
|
||||
if link["rel"] == "self":
|
||||
continue
|
||||
# Currently dislike this layout. Will fix.
|
||||
if sections and link['rel'] not in sections:
|
||||
continue
|
||||
|
||||
resp = requests.get(url, headers={"X-Auth-Token":keystone.auth_token, "Content-Type":"application/json"},
|
||||
data=json.dumps({
|
||||
"q": data_fields
|
||||
})
|
||||
)
|
||||
|
||||
values = json.loads().text)
|
||||
counter_types = set([meter["counter_type"] for meter in meters])
|
||||
|
||||
if len(counter_types) > 1:
|
||||
# Hmm.
|
||||
|
||||
try:
|
||||
func = getattr(self, counter_types[0])
|
||||
if not callable(func):
|
||||
# oops
|
||||
pass
|
||||
func()
|
||||
|
||||
except AttributeError:
|
||||
# Oops!
|
||||
artifice = tenant[resource['rel']].add(usage)
|
||||
artifice.save()
|
||||
self.changes.append(artifice) # Hmm.
|
||||
|
||||
def tenant(self, name):
|
||||
"""
|
||||
Returns a Tenant object describing the specified Tenant by name, or raises a NotFound error.
|
||||
"""
|
||||
# Returns a Tenant object for the given name.
|
||||
# This is irritatingly inefficient
|
||||
self.config["authenticator"]
|
||||
url = "%(url)s/tenants?%(query)s" % {"url": self.config["authenticator"], "query": urllib.urlencode({"name":name})}
|
||||
r = requests.get(url, headers={"X-Auth-Token": keystone.auth_token, "Content-Type": "application/json"})
|
||||
if r.ok:
|
||||
datar = json.loads(r.text)
|
||||
t = Tenant(datar["tenant"])
|
||||
|
||||
return t
|
||||
else:
|
||||
if r.status_code == 404:
|
||||
# couldn't find it
|
||||
raise NotFound
|
||||
|
||||
@property
|
||||
def tenants(self):
|
||||
"""All the tenants in our system"""
|
||||
if not self._tenancy:
|
||||
self._tenancy = dict([(t.name, Tenant(t)) for t in self.auth.tenants.list()))
|
||||
return self._tenancy
|
||||
|
||||
@property
|
||||
def changes(self):
|
||||
return self.changes
|
||||
|
||||
class Tenant(object):
|
||||
|
||||
def __init__(self, tenant):
|
||||
self.tenant = tenant
|
||||
# Conn is the niceometer object we were instanced from
|
||||
self.conn = None
|
||||
self._meters = set()
|
||||
self._resources = None
|
||||
|
||||
def __getattr__(self, attr):
|
||||
if attr not in self.tenant:
|
||||
return super(self, Tenant).__getattr__(attr)
|
||||
return self.tenant["attr"]
|
||||
|
||||
@property
|
||||
def resources(self):
|
||||
if not self._resources:
|
||||
r = requests.get(
|
||||
os.path.join(self.config["ceilometer"]["host"], "v2/resources"),
|
||||
headers={"X-Auth-Token": self.auth.auth_token, "Content-Type":"application/json"},
|
||||
data=json.dumps( { "q": resourcing_fields } )
|
||||
)
|
||||
if not r.ok:
|
||||
return None
|
||||
|
||||
self._resources = json.loads(r.text)
|
||||
return self._resources
|
||||
|
||||
def section(self, section):
|
||||
"""returns an object-sort of thing to represent a section: VM or
|
||||
network or whatever"""
|
||||
return
|
||||
|
||||
# def usage(self, start, end, section=None):
|
||||
def contents(self, start, end):
|
||||
# Returns a usage dict, based on regions.
|
||||
vms = {}
|
||||
vm_to_region = {}
|
||||
ports = {}
|
||||
|
||||
usage_by_dc = {}
|
||||
|
||||
date_fields = [{
|
||||
"field": "timestamp",
|
||||
"op": "ge",
|
||||
"value": start.strftime(date_format)
|
||||
},
|
||||
{
|
||||
"field": "timestamp",
|
||||
"op": "lt",
|
||||
"value": end.strftime(date_format)
|
||||
},
|
||||
]
|
||||
writing_to = None
|
||||
|
||||
vms = []
|
||||
networks = []
|
||||
storage = []
|
||||
images = []
|
||||
|
||||
for resource in self.resources:
|
||||
rels = [link["rel"] for link in resource["links"] if link["rel"] != 'self' ]
|
||||
if "image" in rels:
|
||||
# Images don't have location data - we don't know where they are.
|
||||
# It may not matter where they are.
|
||||
resource["_type"] = "image"
|
||||
images.append(resource)
|
||||
pass
|
||||
elif "storage" in rels:
|
||||
# Unknown how this data layout happens yet.
|
||||
resource["_type"] = "storage"
|
||||
storage.append(resource)
|
||||
pass
|
||||
elif "network" in rels:
|
||||
# Have we seen the VM that owns this yet?
|
||||
resource["_type"] = "network"
|
||||
networks.append(resource)
|
||||
else:
|
||||
resource["_type"] = "vm"
|
||||
vms.append(resource)
|
||||
|
||||
datacenters = {}
|
||||
region_tmpl = { "vms": [],
|
||||
"network": [],
|
||||
"storage": []
|
||||
}
|
||||
vm_to_region = {}
|
||||
for vm in vms:
|
||||
id_ = vm["resource_id"]
|
||||
|
||||
datacenter = self.host_to_dc( vm["metadata"]["host"] )
|
||||
|
||||
if datacenter not in datacenters:
|
||||
dict_ = copy(region_tmpl)
|
||||
datacenters[datacenter] = dict_
|
||||
|
||||
datacenters[datacenter]["vms"].append(vm)
|
||||
|
||||
vm_to_region[id_] = datacenter
|
||||
|
||||
for network in networks:
|
||||
vm_id = network["metadata"]["instance_id"]
|
||||
datacenter = vm_to_region[ vm_id ]
|
||||
|
||||
datacenters[datacenter]["network"].append(network)
|
||||
|
||||
# for resource in storage:
|
||||
# pass
|
||||
|
||||
# for image in images:
|
||||
# pass
|
||||
# # These can be billed as internal transfer, or block storage. TBD.
|
||||
|
||||
# Now, we have everything arranged by region
|
||||
# As we've not queried for individual meters as yet, this represents
|
||||
# only the breakdown of resources that exist in the various datacenter/region
|
||||
# constructs.
|
||||
# So we can now start to collect stats and construct what we consider to be
|
||||
# usage information for this tenant for this timerange
|
||||
|
||||
return Contents(datacenters, start, end)
|
||||
|
||||
@property
|
||||
def meters(self):
|
||||
if not self.meters:
|
||||
resourcing_fields = [{"field": "project_id", "op": "eq", "value": self.tenant.id }]
|
||||
r = requests.get(
|
||||
os.path.join(self.config["ceilometer"]["host"], "v2/resources"),
|
||||
headers={"X-Auth-Token": self.auth.auth_token, "Content-Type":"application/json"},
|
||||
data=json.dumps( { "q": resourcing_fields } )
|
||||
)
|
||||
# meters = set()
|
||||
resources = json.loads(r.text)
|
||||
for resource in resources:
|
||||
for link in resource["links"]:
|
||||
if link["rel"] == "self":
|
||||
continue
|
||||
self._meters.add(link["rel"])
|
||||
# sections.append(Section(self, link))
|
||||
return self._meters()
|
||||
|
||||
|
||||
class Contents(object):
|
||||
|
||||
def __init__(self, contents, start, end):
|
||||
self.contents = contents
|
||||
self.start = start
|
||||
self.end = end
|
||||
|
||||
# Replaces all the internal references with better references to
|
||||
# actual metered values.
|
||||
self._replace()
|
||||
|
||||
def __getitem__(self, item):
|
||||
|
||||
return self.contents[item]
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
# pass
|
||||
keys = self.contents.keys()
|
||||
for key in keys:
|
||||
yield key
|
||||
raise StopIteration()
|
||||
|
||||
def _replace(self):
|
||||
# Turns individual metering objects into
|
||||
# Usage objects that this expects.
|
||||
for dc in contents.iterkeys():
|
||||
for section in contents[dc].iterkeys():
|
||||
meters = []
|
||||
for meter in contents[dc][section]:
|
||||
|
||||
usage = meter.usage(self.start, self.end)
|
||||
usage.db = self.db # catch the DB context?
|
||||
|
||||
meters.append(usage)
|
||||
# Overwrite the original metering objects
|
||||
# with the core usage objects.
|
||||
# This is because we're not storing metering.
|
||||
contents[dc][section] = meters
|
||||
|
||||
def save(self):
|
||||
|
||||
"""
|
||||
Iterate the list of things; save them to DB.
|
||||
"""
|
||||
self.db.begin()
|
||||
for dc in contents.iterkeys():
|
||||
for section in contents[dc].iterkeys():
|
||||
for meter in contents[dc][section]:
|
||||
meter.save()
|
||||
self.db.commit()
|
||||
|
||||
|
||||
class Resource(object):
|
||||
|
||||
def __init__(self, resource):
|
||||
self.resource = resource
|
||||
|
||||
@property
|
||||
def meters(self):
|
||||
meters = []
|
||||
for link in self.resource["links"]:
|
||||
if link["rel"] == "self":
|
||||
continue
|
||||
meter = Meter(self.resource, link)
|
||||
meters.append(meter)
|
||||
return meters
|
||||
|
||||
class Meter(object):
|
||||
|
||||
def __init__(self, resource, link):
|
||||
self.resource = resource
|
||||
self.link = link
|
||||
# self.meter = meter
|
||||
|
||||
def __getitem__(self, x):
|
||||
if isintance(x, slice):
|
||||
# Woo
|
||||
pass
|
||||
pass
|
||||
|
||||
@property
|
||||
def usage(self, start, end):
|
||||
"""
|
||||
Usage condenses the entirety of a meter into a single datapoint:
|
||||
A volume value that we can plot as a single number against previous
|
||||
usage for a given range.
|
||||
"""
|
||||
date_fields = [{
|
||||
"field": "timestamp",
|
||||
"op": "ge",
|
||||
"value": start.strftime(date_format)
|
||||
},
|
||||
{
|
||||
"field": "timestamp",
|
||||
"op": "lt",
|
||||
"value": end.strftime(date_format)
|
||||
}
|
||||
]
|
||||
r = requests.get(
|
||||
self.link,
|
||||
headers={
|
||||
"X-Auth-Token": self.auth.auth_token,
|
||||
"Content-Type":"application/json"},
|
||||
data=json.dumps({"q": date_fields})
|
||||
)
|
||||
measurements = json.loads(r)
|
||||
self.measurements = defaultdict(list)
|
||||
self.type = set([a["counter_type"] for a in measurements])
|
||||
type_ = None
|
||||
if self.type == "cumulative":
|
||||
# The usage is the last one, which is the highest value.
|
||||
#
|
||||
# Base it just on the resource ID.
|
||||
|
||||
# Is this a reasonable thing to do?
|
||||
# Composition style: resource.meter("cpu_util").usage(start, end) == artifact
|
||||
type_ = Cumulative
|
||||
|
||||
elif self.type == "gauge":
|
||||
type_ = Gauge
|
||||
# return Gauge(self.Resource, )
|
||||
elif self.type == "delta":
|
||||
type_ = Delta
|
||||
|
||||
return type_(self.resource, measurements, start, end)
|
||||
|
||||
class Artifact(object):
|
||||
|
||||
def __init__(self, resource, usage, start, end):
|
||||
|
||||
self.resource = resource
|
||||
self.usage = usage
|
||||
self.start = start
|
||||
self.end = end
|
||||
|
||||
def __getitem__(self, item):
|
||||
if item in self._data:
|
||||
return self._data[item]
|
||||
raise KeyError("no such item %s" % item)
|
||||
|
||||
def save(self):
|
||||
"""
|
||||
Persists to our database backend. Opinionatedly this is a sql datastore.
|
||||
"""
|
||||
value = self.volume()
|
||||
# self.artifice.
|
||||
self.db.save(self.resource.id, value, start, end)
|
||||
|
||||
|
||||
def volume(self):
|
||||
"""
|
||||
Default billable number for this volume
|
||||
"""
|
||||
return self.usage[-1]["counter_volume"]
|
||||
|
||||
class Cumulative(Artifact):
|
||||
|
||||
def volume(self):
|
||||
measurements = self.usage
|
||||
measurements = sorted( measurements, key= lambda x: x["timestamp"] )
|
||||
total_usage = measurements[-1]["counter_volume"] - measurements[0]["counter_volume"]
|
||||
return total_usage
|
||||
|
||||
class Gauge(Artifact):
|
||||
|
||||
# def volume(self):
|
||||
# pass
|
||||
pass
|
||||
|
||||
class Delta(Artifact):
|
||||
|
||||
pass
|
50
artifice/invoice.py
Normal file
50
artifice/invoice.py
Normal file
@ -0,0 +1,50 @@
|
||||
# In the real world, costs are expected to be pulled from OpenERP
|
||||
# As this is kind of an alpha piece of code, costs are pulled from
|
||||
# RIGHT HERE.
|
||||
costs = {
|
||||
"cpu_util" : { "local": "1"}
|
||||
}
|
||||
|
||||
class Invoice(object):
|
||||
|
||||
def __init__(self, tenant):
|
||||
self.tenant = tenant
|
||||
|
||||
def bill(self, usage):
|
||||
"""
|
||||
Expects a list of dicts of datacenters
|
||||
Each DC is expected to have a list of Types: VM, Network, Storage
|
||||
Each Type is expected to have a list of Meters
|
||||
Each Meter is expected to have a Usage method that takes our start
|
||||
and end values.
|
||||
Each Meter will be entered as a line on the Invoice.
|
||||
"""
|
||||
|
||||
for dc in usage:
|
||||
# DC is the name of the DC/region. Or the internal code. W/E.
|
||||
# print datacenter
|
||||
self.subheading(dc["name"])
|
||||
for section in dc["sections"]: # will be vm, network, storage
|
||||
self.subheading( section )
|
||||
|
||||
meters = dc["sections"][section]
|
||||
|
||||
for usage in meters:
|
||||
cost = self.cost( dc["name"], meter["name"] )
|
||||
|
||||
self.line( "%s per unit " % cost, usage.volume, cost * usage.volume )
|
||||
self.commit() # Writes to OpenERP? Closes the invoice? Something.
|
||||
|
||||
def commit(self):
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
"""
|
||||
Makes this invoice no longer writable - it's closed and registered as
|
||||
a closed invoice in OpenERP; sent out for payment, etc.
|
||||
"""
|
||||
pass
|
||||
|
||||
def cost(self, datacenter, meter):
|
||||
"""Returns the cost of a given resource in a given datacenter."""
|
||||
return costs[meter][datacenter]
|
8
artifice/models/__init__.py
Normal file
8
artifice/models/__init__.py
Normal file
@ -0,0 +1,8 @@
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
Base = declarative_base()
|
||||
#
|
||||
|
||||
session = sessionmaker()
|
11
artifice/models/resources.py
Normal file
11
artifice/models/resources.py
Normal file
@ -0,0 +1,11 @@
|
||||
from . import Base
|
||||
from sqlalchemy import Column, types
|
||||
|
||||
class Resource(Base):
|
||||
|
||||
__tablename__ = "resources"
|
||||
|
||||
id = Column(String, primary_key=True)
|
||||
type_ = Column(String, primary_key=True)
|
||||
tenant_id = Column(String, ForeignKey("tenants.id"))
|
||||
tenant = relationship("Tenant", backref=backref("tenants", order_by="id"))
|
10
artifice/models/tenants.py
Normal file
10
artifice/models/tenants.py
Normal file
@ -0,0 +1,10 @@
|
||||
from . import Base
|
||||
from sqlalchemy import Column, types
|
||||
from sqlalchemy.schema import CheckConstraint
|
||||
|
||||
class Tenant(Base):
|
||||
|
||||
__tablename__ = 'tenants'
|
||||
# ID is a uuid
|
||||
id = Column(String, primary_key=True, nullable=False)
|
||||
# Some reference data to something else?
|
40
artifice/models/usage.py
Normal file
40
artifice/models/usage.py
Normal file
@ -0,0 +1,40 @@
|
||||
from . import Base
|
||||
from .resource import Resource
|
||||
from sqlalchemy import Column, types
|
||||
from sqlalchemy.orm import relationship, backref
|
||||
# from sqlalchemy.schema import CheckConstraint
|
||||
import datetime
|
||||
|
||||
from sqlalchemy.dialects.postgresql import ExcludeConstraint, TSRANGE
|
||||
|
||||
|
||||
class Usage(Base):
|
||||
|
||||
__tablename__ = 'usage'
|
||||
|
||||
resource_id = Column(String, ForeignKey("resources.id"))
|
||||
resource =
|
||||
# tenant = Column(String, nullable=False)
|
||||
tenant_id = Column(String, ForeignKey("tenants.id"))
|
||||
tenant = relationship("Tenant", backref=backref("usage", order_by=created))
|
||||
volume = Column(String, nullable=False)
|
||||
time = Column(TSRANGE, nullable=False)
|
||||
start = Column(types.DateTime, nullable=False)
|
||||
end = Column(types.DateTime, nullable=False)
|
||||
created = Column(types.DateTime, nullable=False)
|
||||
|
||||
__table_args__ = (
|
||||
ExcludeConstraint(
|
||||
('tenant_id', '='),
|
||||
('resource_id', '='),
|
||||
('time', '&&')
|
||||
),
|
||||
CheckConstraint("start > end"),
|
||||
)
|
||||
|
||||
def __init__(self, resource, tenant, start, end):
|
||||
|
||||
assert start < end
|
||||
|
||||
self.time = [start, end]
|
||||
self.created = datetime.datetime.now()
|
96
bin/bill.py
Normal file
96
bin/bill.py
Normal file
@ -0,0 +1,96 @@
|
||||
from artifice import interface
|
||||
import datetime
|
||||
|
||||
date_format = "%Y-%m-%dT%H:%M:%S"
|
||||
other_date_format = "%Y-%m-%dT%H:%M:%S.%f"
|
||||
|
||||
date_fmt_fnc = lambda x: datetime.datetime.strptime(date_fmt)
|
||||
|
||||
if __name__ == '__main__':
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
# Takes names to display.
|
||||
# none means display them all.
|
||||
parser.add_argument("-t", "--tenant", dest="tenants", help='Tenant to display', action="append", default=[])
|
||||
|
||||
# Add some sections to show data from.
|
||||
# Empty is display all
|
||||
parser.add_argument("-s", "--section", dest="sections", help="Sections to display", action="append")
|
||||
|
||||
|
||||
# Ranging
|
||||
# We want to get stuff from, to.
|
||||
|
||||
parser.add_argument("--from", dest="start", help="When to start our range, date format %s", type=date_fmt_fnc)
|
||||
parser.add_argument("--to", dest="end", help="When to end our date range. Defaults to yesterday.",
|
||||
type=date_fmt_fnc, default=datetime.datetime.now() - datetime.timedelta(days=1) )
|
||||
|
||||
parser.add_argument("--config", dest="config", help="Config file", default="/etc/niceometer/conf.yaml")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Make ourselves a nice interaction object
|
||||
n = niceometer.Niceometer(conf["username"], conf["password"], conf["admin_tenant"])
|
||||
tenants = args.tenants
|
||||
if not args.tenants:
|
||||
# only parse this list of tenants
|
||||
tenants = n.tenants
|
||||
|
||||
for tenant_name in tenants:
|
||||
# artifact = n.tenant(tenant_name).section(section).usage(args.start, args.end)
|
||||
# data should now be an artifact-like construct.
|
||||
# Data also knows about Samples, where as an Artifact doesn't.
|
||||
|
||||
# An artifact knows its section
|
||||
tenant = n.tenant(tenant_name)
|
||||
# Makes a new invoice up for this tenant.
|
||||
invoice = tenant.invoice(start=args.start, end=args.end)
|
||||
print "Tenant: %s" % tenant.name
|
||||
print "Range: %s -> %s" % (args.start, args.end)
|
||||
|
||||
# usage = tenant.usage(start, end)
|
||||
usage = tenant.contents(start, end)
|
||||
# A Usage set is the entirety of time for this Tenant.
|
||||
# It's not time-limited at all.
|
||||
# But the
|
||||
usage.save()
|
||||
invoice.bill(usage)
|
||||
invoice.close()
|
||||
|
||||
for datacenter, sections in usage.iteritems():
|
||||
# DC is the name of the DC/region. Or the internal code. W/E.
|
||||
print datacenter
|
||||
|
||||
for section_name in args.sections:
|
||||
assert section in sections
|
||||
|
||||
# section = sections[ section ]
|
||||
print sections[section_name]
|
||||
for resources in sections[section_name]:
|
||||
for resource in resources:
|
||||
print resource
|
||||
for meter in resource.meters:
|
||||
usage = meter.usage(start, end)
|
||||
if usage.has_been_saved():
|
||||
continue
|
||||
print usage.volume()
|
||||
print usage.cost()
|
||||
usage.save()
|
||||
# Finally, bill it.
|
||||
# All of these things need to be converted to the
|
||||
# publicly-viewable version now.
|
||||
invoice.bill(datacenter, resource, meter, usage)
|
||||
|
||||
# Section is going to be in the set of vm, network, storage, image
|
||||
# # or just all of them.
|
||||
# # It's not going to be an individual meter name.
|
||||
# artifacts = section.usage(args.start, args.end)
|
||||
# for artifact in artifacts:
|
||||
# if artifact.has_been_saved:
|
||||
# # Does this artifact exist in the DB?
|
||||
# continue
|
||||
# artifact.save() # Save to the Artifact storage
|
||||
# # Saves to the invoice.
|
||||
# invoice.bill ( artifact )
|
||||
# # artifact.bill( invoice.id )
|
||||
# print "%s: %s" % (section.name, artifact.volume)
|
0
bin/usage.py
Normal file
0
bin/usage.py
Normal file
0
sql/init.sql
Normal file
0
sql/init.sql
Normal file
Loading…
x
Reference in New Issue
Block a user