Merge "Update IronicHostManager tests to better match how code works"

This commit is contained in:
Jenkins 2014-06-18 11:11:19 +00:00 committed by Gerrit Code Review
commit 6fec946cfd
2 changed files with 26 additions and 74 deletions

View File

@ -16,6 +16,8 @@
Fake nodes for Ironic host manager tests.
"""
from nova.openstack.common import jsonutils
COMPUTE_NODES = [
dict(id=1, local_gb=10, memory_mb=1024, vcpus=1,
@ -24,9 +26,9 @@ COMPUTE_NODES = [
service=dict(host='host1', disabled=False),
hypervisor_hostname='node1uuid', host_ip='127.0.0.1',
hypervisor_version=1, hypervisor_type='ironic',
stats=dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386'),
stats=jsonutils.dumps(dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386')),
supported_instances='[["i386", "baremetal", "baremetal"]]',
free_disk_gb=10, free_ram_mb=1024),
dict(id=2, local_gb=20, memory_mb=2048, vcpus=1,
@ -35,9 +37,9 @@ COMPUTE_NODES = [
service=dict(host='host2', disabled=True),
hypervisor_hostname='node2uuid', host_ip='127.0.0.1',
hypervisor_version=1, hypervisor_type='ironic',
stats=dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386'),
stats=jsonutils.dumps(dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386')),
supported_instances='[["i386", "baremetal", "baremetal"]]',
free_disk_gb=20, free_ram_mb=2048),
dict(id=3, local_gb=30, memory_mb=3072, vcpus=1,
@ -46,9 +48,9 @@ COMPUTE_NODES = [
service=dict(host='host3', disabled=False),
hypervisor_hostname='node3uuid', host_ip='127.0.0.1',
hypervisor_version=1, hypervisor_type='ironic',
stats=dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386'),
stats=jsonutils.dumps(dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386')),
supported_instances='[["i386", "baremetal", "baremetal"]]',
free_disk_gb=30, free_ram_mb=3072),
dict(id=4, local_gb=40, memory_mb=4096, vcpus=1,
@ -57,31 +59,17 @@ COMPUTE_NODES = [
service=dict(host='host4', disabled=False),
hypervisor_hostname='node4uuid', host_ip='127.0.0.1',
hypervisor_version=1, hypervisor_type='ironic',
stats=dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386'),
stats=jsonutils.dumps(dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386')),
supported_instances='[["i386", "baremetal", "baremetal"]]',
free_disk_gb=40, free_ram_mb=4096),
# Broken entry
dict(id=5, local_gb=50, memory_mb=5120, vcpus=1, service=None,
cpu_info='baremetal cpu',
stats=dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386'),
stats=jsonutils.dumps(dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386')),
supported_instances='[["i386", "baremetal", "baremetal"]]',
free_disk_gb=50, free_ram_mb=5120),
]
IRONIC_SERVICE_STATE = {
('host1', 'node1uuid'): {'compute': {'ironic_driver':
"nova.virt.ironic.driver.IronicDriver"}},
('host2', 'node2uuid'): {'compute': {'ironic_driver':
"nova.virt.ironic.driver.IronicDriver"}},
('host3', 'node3uuid'): {'compute': {'ironic_driver':
"nova.virt.ironic.driver.IronicDriver"}},
('host4', 'node4uuid'): {'compute': {'ironic_driver':
"nova.virt.ironic.driver.IronicDriver"}},
('host5', 'node5uuid'): {'compute': {'ironic_driver':
"nova.virt.ironic.driver.IronicDriver"}},
}

View File

@ -22,6 +22,7 @@ from ironic.nova.tests.scheduler import ironic_fakes
from nova import db
from nova import exception
from nova.openstack.common import jsonutils
from nova.scheduler import filters
from nova import test
@ -42,10 +43,6 @@ class IronicHostManagerTestCase(test.NoDBTestCase):
def setUp(self):
super(IronicHostManagerTestCase, self).setUp()
self.host_manager = ironic_host_manager.IronicHostManager()
self.fake_hosts = [ironic_host_manager.IronicNodeState(
'fake_host%s' % x, 'fake-node') for x in range(1, 5)]
self.fake_hosts += [ironic_host_manager.IronicNodeState(
'fake_multihost', 'fake-node%s' % x) for x in range(1, 5)]
def test_get_all_host_states(self):
# Ensure .service is set and we have the values we expect to.
@ -55,12 +52,10 @@ class IronicHostManagerTestCase(test.NoDBTestCase):
db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
self.mox.ReplayAll()
self.host_manager.service_states = ironic_fakes.IRONIC_SERVICE_STATE
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
self.assertEqual(len(host_states_map), 4)
# Check that .service is set properly
for i in range(4):
compute_node = ironic_fakes.COMPUTE_NODES[i]
host = compute_node['service']['host']
@ -68,23 +63,12 @@ class IronicHostManagerTestCase(test.NoDBTestCase):
state_key = (host, node)
self.assertEqual(compute_node['service'],
host_states_map[state_key].service)
# check we have the values we think we should.
self.assertEqual(1024,
host_states_map[('host1', 'node1uuid')].free_ram_mb)
self.assertEqual(10240,
host_states_map[('host1', 'node1uuid')].free_disk_mb)
self.assertEqual(2048,
host_states_map[('host2', 'node2uuid')].free_ram_mb)
self.assertEqual(20480,
host_states_map[('host2', 'node2uuid')].free_disk_mb)
self.assertEqual(3072,
host_states_map[('host3', 'node3uuid')].free_ram_mb)
self.assertEqual(30720,
host_states_map[('host3', 'node3uuid')].free_disk_mb)
self.assertEqual(4096,
host_states_map[('host4', 'node4uuid')].free_ram_mb)
self.assertEqual(40960,
host_states_map[('host4', 'node4uuid')].free_disk_mb)
self.assertEqual(jsonutils.loads(compute_node['stats']),
host_states_map[state_key].stats)
self.assertEqual(compute_node['free_ram_mb'],
host_states_map[state_key].free_ram_mb)
self.assertEqual(compute_node['free_disk_gb'] * 1024,
host_states_map[state_key].free_disk_mb)
class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
@ -93,34 +77,16 @@ class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
def setUp(self):
super(IronicHostManagerChangedNodesTestCase, self).setUp()
self.host_manager = ironic_host_manager.IronicHostManager()
self.fake_hosts = [
ironic_host_manager.IronicNodeState('host1', 'node1uuid'),
ironic_host_manager.IronicNodeState('host2', 'node2uuid'),
ironic_host_manager.IronicNodeState('host3', 'node3uuid'),
ironic_host_manager.IronicNodeState('host4', 'node4uuid')
]
self.compute_node = dict(id=1, local_gb=10, memory_mb=1024, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
updated_at=None, cpu_info='baremetal cpu',
stats=dict(ironic_driver=
stats=jsonutils.dumps(dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386'),
cpu_arch='i386')),
supported_instances=
'[["i386", "baremetal", "baremetal"]]',
free_disk_gb=10, free_ram_mb=1024)
def test_get_all_host_states(self):
context = 'fake_context'
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
self.mox.ReplayAll()
self.host_manager.service_states = ironic_fakes.IRONIC_SERVICE_STATE
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
self.assertEqual(4, len(host_states_map))
def test_get_all_host_states_after_delete_one(self):
context = 'fake_context'
@ -133,7 +99,6 @@ class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
db.compute_node_get_all(context).AndReturn(running_nodes)
self.mox.ReplayAll()
self.host_manager.service_states = ironic_fakes.IRONIC_SERVICE_STATE
self.host_manager.get_all_host_states(context)
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
@ -149,7 +114,6 @@ class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
db.compute_node_get_all(context).AndReturn([])
self.mox.ReplayAll()
self.host_manager.service_states = ironic_fakes.IRONIC_SERVICE_STATE
self.host_manager.get_all_host_states(context)
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map