Add tox configuration

Change-Id: Ic4051617b5757c649cd59dacf8e9dd9d7e9819a6
Signed-off-by: Julien Danjou <julien.danjou@enovance.com>
This commit is contained in:
Doug Hellmann 2012-05-29 17:57:58 -04:00 committed by Julien Danjou
parent a49e59b115
commit 5e0a32f475
11 changed files with 107 additions and 28 deletions

1
.gitignore vendored
View File

@ -4,3 +4,4 @@ TAGS
*.egg-info *.egg-info
build build
.coverage .coverage
.tox

View File

@ -32,17 +32,18 @@ if __name__ == '__main__':
flags.FLAGS(sys.argv) flags.FLAGS(sys.argv)
logging.setup() logging.setup()
utils.monkey_patch() utils.monkey_patch()
server = service.Service.create(binary='ceilometer-agent', server = \
topic='ceilometer.agent', service.Service.create(binary='ceilometer-agent',
manager='ceilometer.agent.manager.AgentManager', topic='ceilometer.agent',
# FIXME(dhellmann): The manager='ceilometer.agent.manager.AgentManager',
# periodic_interval is set very # FIXME(dhellmann): The
# short for development. After we # periodic_interval is set very
# fix the configuration loading we # short for development. After we
# can use the config file to # fix the configuration loading we
# adjust it and remove this # can use the config file to
# hard-coded value. # adjust it and remove this
periodic_interval=10, # hard-coded value.
) periodic_interval=10,
)
service.serve(server) service.serve(server)
service.wait() service.wait()

View File

@ -32,8 +32,10 @@ if __name__ == '__main__':
flags.FLAGS(sys.argv) flags.FLAGS(sys.argv)
logging.setup() logging.setup()
utils.monkey_patch() utils.monkey_patch()
server = service.Service.create(binary='ceilometer-collector', server = \
topic='ceilometer.collector', service.Service.create(binary='ceilometer-collector',
manager='ceilometer.collector.manager.CollectorManager') topic='ceilometer.collector',
manager='ceilometer.collector.'
'manager.CollectorManager')
service.serve(server) service.serve(server)
service.wait() service.wait()

View File

@ -78,14 +78,17 @@ class DiskIOPollster(plugin.PollsterBase):
try: try:
disks = self._get_disks(conn, instance.name) disks = self._get_disks(conn, instance.name)
except Exception as err: except Exception as err:
self.LOG.warning('Ignoring instance %s: %s', instance.name, err) self.LOG.warning('Ignoring instance %s: %s', \
instance.name, err)
self.LOG.exception(err) self.LOG.exception(err)
continue continue
bytes = 0 bytes = 0
for disk in disks: for disk in disks:
stats = conn.block_stats(instance.name, disk) stats = conn.block_stats(instance.name, disk)
self.LOG.info("DISKIO USAGE: %s %s: read-requests=%d read-bytes=%d write-requests=%d write-bytes=%d errors=%d", self.LOG.info("DISKIO USAGE: %s %s:"
instance, disk, stats[0], stats[1], stats[2], stats[3], stats[4]) "read-requests=%d read-bytes=%d write-requests=%d write-bytes=%d errors=%d",
instance, disk, stats[0], stats[1],
stats[2], stats[3], stats[4])
bytes += stats[1] + stats[3] # combine read and write bytes += stats[1] + stats[3] # combine read and write
yield make_counter_from_instance(instance, yield make_counter_from_instance(instance,
type='disk', type='disk',
@ -101,11 +104,13 @@ class CPUPollster(plugin.PollsterBase):
conn = nova.virt.connection.get_connection(read_only=True) conn = nova.virt.connection.get_connection(read_only=True)
# FIXME(dhellmann): How do we get a list of instances without # FIXME(dhellmann): How do we get a list of instances without
# talking directly to the database? # talking directly to the database?
for instance in manager.db.instance_get_all_by_host(context, manager.host): for instance in manager.db.instance_get_all_by_host(context,
manager.host):
self.LOG.info('checking instance %s', instance.uuid) self.LOG.info('checking instance %s', instance.uuid)
try: try:
cpu_info = conn.get_info(instance) cpu_info = conn.get_info(instance)
self.LOG.info("CPUTIME USAGE: %s %d", instance, cpu_info['cpu_time']) self.LOG.info("CPUTIME USAGE: %s %d",
instance, cpu_info['cpu_time'])
yield make_counter_from_instance(instance, yield make_counter_from_instance(instance,
type='cpu', type='cpu',
volume=cpu_info['cpu_time'], volume=cpu_info['cpu_time'],

View File

@ -43,8 +43,10 @@ class FloatingIPPollster(plugin.PollsterBase):
resource_id=ip.id, resource_id=ip.id,
datetime=None, datetime=None,
duration=None, duration=None,
resource_metadata={'address': ip.address, resource_metadata={
'fixed_ip_id': ip.fixed_ip_id, 'address': ip.address,
'host': ip.host, 'fixed_ip_id': ip.fixed_ip_id,
'pool': ip.pool, 'host': ip.host,
'auto_assigned': ip.auto_assigned}) 'pool': ip.pool,
'auto_assigned': ip.auto_assigned
})

View File

@ -43,4 +43,5 @@ class PollsterBase(object):
@abc.abstractmethod @abc.abstractmethod
def get_counters(self, manager, context): def get_counters(self, manager, context):
"""Return a sequence of Counter instances from polling the resources.""" """Return a sequence of Counter instances from polling the
resources."""

7
setup.cfg Normal file
View File

@ -0,0 +1,7 @@
[nosetests]
cover-package = ceilometer
cover-html = true
cover-erase = true
cover-inclusive = true
verbosity=2
detailed-errors=1

View File

@ -33,12 +33,14 @@ setuptools.setup(
py_modules=[], py_modules=[],
entry_points={ entry_points={
'ceilometer.collector.compute': [ 'ceilometer.collector.compute': [
'instance_create = ceilometer.compute.notifications:InstanceCreate', 'instance_create'
'= ceilometer.compute.notifications:InstanceCreate',
], ],
'ceilometer.poll.compute': [ 'ceilometer.poll.compute': [
'libvirt_diskio = ceilometer.compute.libvirt:DiskIOPollster', 'libvirt_diskio = ceilometer.compute.libvirt:DiskIOPollster',
'libvirt_cpu = ceilometer.compute.libvirt:CPUPollster', 'libvirt_cpu = ceilometer.compute.libvirt:CPUPollster',
'network_floatingip = ceilometer.compute.network:FloatingIPPollster', 'network_floatingip'
'= ceilometer.compute.network:FloatingIPPollster',
], ],
}, },
) )

10
tools/pip-requires Normal file
View File

@ -0,0 +1,10 @@
https://github.com/openstack/openstack-common/zipball/master#egg=openstack.common
https://github.com/openstack/nova/zipball/master#egg=nova
webob
kombu
iso8601
lockfile
netaddr
argparse
sqlalchemy
anyjson==0.3.1

6
tools/test-requires Normal file
View File

@ -0,0 +1,6 @@
nose
openstack.nose_plugin
coverage
pep8>=1.0
eventlet
mox

42
tox.ini Normal file
View File

@ -0,0 +1,42 @@
[tox]
envlist = py26,py27,pep8
[testenv]
setenv = VIRTUAL_ENV={envdir}
NOSE_WITH_OPENSTACK=1
NOSE_OPENSTACK_COLOR=1
NOSE_OPENSTACK_RED=0.05
NOSE_OPENSTACK_YELLOW=0.025
NOSE_OPENSTACK_SHOW_ELAPSED=1
deps = -r{toxinidir}/tools/pip-requires
-r{toxinidir}/tools/test-requires
commands = nosetests
[testenv:pep8]
deps = pep8
commands = pep8 --repeat --show-source ceilometer setup.py bin/ceilometer-agent bin/ceilometer-collector
[testenv:venv]
commands = {posargs}
[testenv:cover]
commands = nosetests --cover-erase --cover-package=ceilometer --with-xcoverage
[tox:jenkins]
downloadcache = ~/cache/pip
[testenv:jenkins26]
basepython = python2.6
setenv = NOSE_WITH_XUNIT=1
[testenv:jenkins27]
basepython = python2.7
setenv = NOSE_WITH_XUNIT=1
[testenv:jenkinscover]
setenv = NOSE_WITH_XUNIT=1
commands = nosetests --cover-erase --cover-package=ceilometer --with-xcoverage
[testenv:jenkinsvenv]
setenv = NOSE_WITH_XUNIT=1
commands = {posargs}