Drop dependency on libvirt-python
Most existing tests are using our own VirshXMLClient (based on the upstream tempest ssh client) and only the test_cpu_pinning.py tests are using the libvirt client. On one hand, using the libvirt-python client proves a bit more complicated on a tripleo deployment, and it is taking too much effort to get it to work on our infrared deployment. On the other hand, all the other existing tests are using the ssh client. If libvirt-python clients are to be introduced, it should be inside of clients.py and it would be included in all the other tests. (PS: Prior to this patch, the cpu pinning tests have not been run successfully on our infrared deployment) Change-Id: I2b65dca4fd26e9524b2b3ed7e182292eb5595599
This commit is contained in:
parent
d1535bb5b0
commit
27043b74c6
@ -1,2 +0,0 @@
|
||||
python-libvirt [platform:dpkg]
|
||||
libvirt-python [platform:rpm]
|
15
tox.ini
15
tox.ini
@ -27,17 +27,4 @@ show-source = True
|
||||
exclude = .git,.venv,.tox,dist,doc,*egg
|
||||
|
||||
[hacking]
|
||||
local-check-factory = tempest.hacking.checks.factory
|
||||
|
||||
[testenv:bindep]
|
||||
# Do not install any requirements. We want this to be fast and work even if
|
||||
# system dependencies are missing, since it's used to tell you what system
|
||||
# dependencies are missing! This also means that bindep must be installed
|
||||
# separately, outside of the requirements files, and develop mode disabled
|
||||
# explicitly to avoid unnecessarily installing the checked-out repo too (this
|
||||
# further relies on "tox.skipsdist = True" above).
|
||||
usedevelop = False
|
||||
deps =
|
||||
bindep
|
||||
commands =
|
||||
bindep test
|
||||
local-check-factory = tempest.hacking.checks.factory
|
@ -13,15 +13,11 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
import libvirt
|
||||
from oslo_log import log as logging
|
||||
from tempest.api.compute import base
|
||||
from tempest.common import waiters
|
||||
from tempest import config
|
||||
|
||||
from whitebox_tempest_plugin.common import utils as whitebox_utils
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -64,18 +60,3 @@ class BaseTest(base.BaseV2ComputeAdminTest):
|
||||
'ACTIVE')
|
||||
|
||||
return self.servers_client.show_server(server_id)['server']
|
||||
|
||||
@contextmanager
|
||||
def get_libvirt_conn(self, hostname):
|
||||
"""Get a read-only connection to a remote libvirt instance.
|
||||
|
||||
:param hostname: The hostname for the remote libvirt instance.
|
||||
"""
|
||||
# Assume we're using QEMU-KVM and that network conectivity is available
|
||||
libvirt_url = 'qemu+ssh://{}@{}/system'.format(
|
||||
CONF.whitebox.target_ssh_user,
|
||||
whitebox_utils.get_hypervisor_ip(self.servers_client, hostname))
|
||||
|
||||
conn = libvirt.openReadOnly(libvirt_url)
|
||||
yield conn
|
||||
conn.close()
|
||||
|
@ -24,6 +24,7 @@ For more information, refer to:
|
||||
- https://github.com/openstack/intel-nfv-ci-tests
|
||||
"""
|
||||
|
||||
import exceptions
|
||||
import testtools
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
@ -31,6 +32,8 @@ from tempest.common import utils
|
||||
from tempest import config
|
||||
|
||||
from whitebox_tempest_plugin.api.compute import base
|
||||
from whitebox_tempest_plugin.common import utils as whitebox_utils
|
||||
from whitebox_tempest_plugin.services import clients
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
@ -40,11 +43,12 @@ class BaseTest(base.BaseTest):
|
||||
vcpus = 2
|
||||
|
||||
def get_server_cpu_pinning(self, server):
|
||||
instance_name = server['OS-EXT-SRV-ATTR:instance_name']
|
||||
compute_node_address = whitebox_utils.get_hypervisor_ip(
|
||||
self.servers_client, server['id'])
|
||||
|
||||
with self.get_libvirt_conn(server['OS-EXT-SRV-ATTR:host']) as conn:
|
||||
dom0 = conn.lookupByName(instance_name)
|
||||
root = ET.fromstring(dom0.XMLDesc())
|
||||
virshxml = clients.VirshXMLClient(compute_node_address)
|
||||
xml = virshxml.dumpxml(server['id'])
|
||||
root = ET.fromstring(xml)
|
||||
|
||||
vcpupin_nodes = root.findall('./cputune/vcpupin')
|
||||
cpu_pinnings = {int(x.get('vcpu')): int(x.get('cpuset'))
|
||||
@ -207,16 +211,21 @@ class CPUThreadPolicyTest(BaseTest):
|
||||
core_1: [sibling_a, sibling_b, ...],
|
||||
...}
|
||||
|
||||
libvirt's getCapabilities() is called to get details about the host
|
||||
`virsh capabilities` is called to get details about the host
|
||||
then a list of siblings per CPU is extracted and formatted to single
|
||||
level list.
|
||||
"""
|
||||
siblings = {}
|
||||
|
||||
with self.get_libvirt_conn(host) as conn:
|
||||
capxml = ET.fromstring(conn.getCapabilities())
|
||||
|
||||
cpu_cells = capxml.findall('./host/topology/cells/cell/cpus')
|
||||
try:
|
||||
host_address = CONF.whitebox.hypervisors[host]
|
||||
except KeyError:
|
||||
raise exceptions.MissingHypervisorException(server="",
|
||||
host=host)
|
||||
virshxml = clients.VirshXMLClient(host_address)
|
||||
capxml = virshxml.capabilities()
|
||||
root = ET.fromstring(capxml)
|
||||
cpu_cells = root.findall('./host/topology/cells/cell/cpus')
|
||||
for cell in cpu_cells:
|
||||
cpus = cell.findall('cpu')
|
||||
for cpu in cpus:
|
||||
|
@ -93,3 +93,12 @@ class VirshXMLClient(SSHClient):
|
||||
with ctx:
|
||||
command = "virsh dumpxml {}".format(domain)
|
||||
return self.execute(self.host, command)
|
||||
|
||||
def capabilities(self):
|
||||
if CONF.whitebox.containers:
|
||||
ctx = self.container_command('nova_compute', user='root')
|
||||
else:
|
||||
ctx = self.sudo_command()
|
||||
with ctx:
|
||||
command = "virsh capabilities"
|
||||
return self.execute(self.host, command)
|
||||
|
Loading…
x
Reference in New Issue
Block a user