Merge "Remove unused nova specific files"

This commit is contained in:
Jenkins 2013-10-23 13:12:09 +00:00 committed by Gerrit Code Review
commit bbefb22efd
7 changed files with 0 additions and 1413 deletions

View File

@ -1,485 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
#
# Copyright (c) 2012 NTT DOCOMO, INC
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A driver for Bare-metal platform.
"""
from oslo.config import cfg
from nova.compute import power_state
from ironic.common import context as ironic_context
from ironic.common import exception
from ironic.common import paths
from ironic.common import states
from ironic.openstack.common import excutils
from ironic.openstack.common import importutils
from ironic.openstack.common import log as logging
from ironic import db
from nova.virt import driver
from nova.virt import firewall
from nova.virt.libvirt import imagecache
opts = [
cfg.BoolOpt('inject_password',
default=True,
help='Whether baremetal compute injects password or not'),
cfg.StrOpt('injected_network_template',
default=paths.basedir_def('nova/virt/'
'baremetal/interfaces.template'),
help='Template file for injected network'),
cfg.StrOpt('vif_driver',
default='nova.virt.baremetal.vif_driver.BareMetalVIFDriver',
help='Baremetal VIF driver.'),
cfg.StrOpt('volume_driver',
default='nova.virt.baremetal.volume_driver.LibvirtVolumeDriver',
help='Baremetal volume driver.'),
cfg.ListOpt('instance_type_extra_specs',
default=[],
help='a list of additional capabilities corresponding to '
'instance_type_extra_specs for this compute '
'host to advertise. Valid entries are name=value, pairs '
'For example, "key1:val1, key2:val2"'),
cfg.StrOpt('driver',
default='nova.virt.baremetal.pxe.PXE',
help='Baremetal driver back-end (pxe or tilera)'),
cfg.StrOpt('power_manager',
default='nova.virt.baremetal.ipmi.IPMI',
help='Baremetal power management method'),
cfg.StrOpt('tftp_root',
default='/tftpboot',
help='Baremetal compute node\'s tftp root path'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(opts)
CONF.import_opt('host', 'ironic.netconf')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
firewall.__name__,
firewall.NoopFirewallDriver.__name__)
def _get_baremetal_node_by_instance_uuid(instance_uuid):
ctx = nova_context.get_admin_context()
node = db.bm_node_get_by_instance_uuid(ctx, instance_uuid)
if node['service_host'] != CONF.host:
LOG.error(_("Request for baremetal node %s "
"sent to wrong service host") % instance_uuid)
raise exception.InstanceNotFound(instance_id=instance_uuid)
return node
def _update_state(context, node, instance, state):
"""Update the node state in baremetal DB
If instance is not supplied, reset the instance_uuid field for this node.
"""
values = {'task_state': state}
if not instance:
values['instance_uuid'] = None
values['instance_name'] = None
db.bm_node_update(context, node['id'], values)
def get_power_manager(**kwargs):
cls = importutils.import_class(CONF.power_manager)
return cls(**kwargs)
class BareMetalDriver(driver.ComputeDriver):
"""BareMetal hypervisor driver."""
capabilities = {
"has_imagecache": True,
}
def __init__(self, virtapi, read_only=False):
super(BareMetalDriver, self).__init__(virtapi)
self.driver = importutils.import_object(
CONF.driver, virtapi)
self.vif_driver = importutils.import_object(
CONF.vif_driver)
self.firewall_driver = firewall.load_driver(
default=DEFAULT_FIREWALL_DRIVER)
self.volume_driver = importutils.import_object(
CONF.volume_driver, virtapi)
self.image_cache_manager = imagecache.ImageCacheManager()
extra_specs = {}
extra_specs["baremetal_driver"] = CONF.driver
for pair in CONF.instance_type_extra_specs:
keyval = pair.split(':', 1)
keyval[0] = keyval[0].strip()
keyval[1] = keyval[1].strip()
extra_specs[keyval[0]] = keyval[1]
if 'cpu_arch' not in extra_specs:
LOG.warning(
_('cpu_arch is not found in instance_type_extra_specs'))
extra_specs['cpu_arch'] = ''
self.extra_specs = extra_specs
self.supported_instances = [
(extra_specs['cpu_arch'], 'baremetal', 'baremetal'),
]
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
cls._instance = cls()
return cls._instance
def init_host(self, host):
return
def get_hypervisor_type(self):
return 'baremetal'
def get_hypervisor_version(self):
# TODO(deva): define the version properly elsewhere
return 1
def legacy_nwinfo(self):
return True
def list_instances(self):
l = []
context = nova_context.get_admin_context()
for node in db.bm_node_get_associated(context, service_host=CONF.host):
l.append(node['instance_name'])
return l
def _require_node(self, instance):
"""Get a node's uuid out of a manager instance dict.
The compute manager is meant to know the node uuid, so missing uuid
a significant issue - it may mean we've been passed someone elses data.
"""
node_uuid = instance.get('node')
if not node_uuid:
raise exception.NovaException(_(
"Baremetal node id not supplied to driver for %r")
% instance['uuid'])
return node_uuid
def _attach_block_devices(self, instance, block_device_info):
block_device_mapping = driver.\
block_device_info_get_mapping(block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mountpoint = vol['mount_device']
self.attach_volume(
connection_info, instance['name'], mountpoint)
def _detach_block_devices(self, instance, block_device_info):
block_device_mapping = driver.\
block_device_info_get_mapping(block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mountpoint = vol['mount_device']
self.detach_volume(
connection_info, instance['name'], mountpoint)
def _start_firewall(self, instance, network_info):
self.firewall_driver.setup_basic_filtering(
instance, network_info)
self.firewall_driver.prepare_instance_filter(
instance, network_info)
self.firewall_driver.apply_instance_filter(
instance, network_info)
def _stop_firewall(self, instance, network_info):
self.firewall_driver.unfilter_instance(
instance, network_info)
def macs_for_instance(self, instance):
context = nova_context.get_admin_context()
node_uuid = self._require_node(instance)
node = db.bm_node_get_by_node_uuid(context, node_uuid)
ifaces = db.bm_interface_get_all_by_bm_node_id(context, node['id'])
return set(iface['address'] for iface in ifaces)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
node_uuid = self._require_node(instance)
# NOTE(deva): this db method will raise an exception if the node is
# already in use. We call it here to ensure no one else
# allocates this node before we begin provisioning it.
node = db.bm_node_associate_and_update(context, node_uuid,
{'instance_uuid': instance['uuid'],
'instance_name': instance['hostname'],
'task_state': states.BUILDING})
try:
self._plug_vifs(instance, network_info, context=context)
self._attach_block_devices(instance, block_device_info)
self._start_firewall(instance, network_info)
self.driver.cache_images(
context, node, instance,
admin_password=admin_password,
image_meta=image_meta,
injected_files=injected_files,
network_info=network_info,
)
self.driver.activate_bootloader(context, node, instance)
self.power_on(instance, node)
self.driver.activate_node(context, node, instance)
_update_state(context, node, instance, states.ACTIVE)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("Error deploying instance %(instance)s "
"on baremetal node %(node)s.") %
{'instance': instance['uuid'],
'node': node['uuid']})
# Do not set instance=None yet. This prevents another
# spawn() while we are cleaning up.
_update_state(context, node, instance, states.ERROR)
self.driver.deactivate_node(context, node, instance)
self.power_off(instance, node)
self.driver.deactivate_bootloader(context, node, instance)
self.driver.destroy_images(context, node, instance)
self._detach_block_devices(instance, block_device_info)
self._stop_firewall(instance, network_info)
self._unplug_vifs(instance, network_info)
_update_state(context, node, None, states.DELETED)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
ctx = nova_context.get_admin_context()
pm = get_power_manager(node=node, instance=instance)
state = pm.reboot_node()
if pm.state != states.ACTIVE:
raise exception.InstanceRebootFailure(_(
"Baremetal power manager failed to restart node "
"for instance %r") % instance['uuid'])
_update_state(ctx, node, instance, state)
def destroy(self, instance, network_info, block_device_info=None):
context = nova_context.get_admin_context()
try:
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
except exception.InstanceNotFound:
LOG.warning(_("Destroy called on non-existing instance %s")
% instance['uuid'])
return
try:
self.driver.deactivate_node(context, node, instance)
self.power_off(instance, node)
self.driver.deactivate_bootloader(context, node, instance)
self.driver.destroy_images(context, node, instance)
self._detach_block_devices(instance, block_device_info)
self._stop_firewall(instance, network_info)
self._unplug_vifs(instance, network_info)
_update_state(context, node, None, states.DELETED)
except Exception as e:
with excutils.save_and_reraise_exception():
try:
LOG.error(_("Error from baremetal driver "
"during destroy: %s") % e)
_update_state(context, node, instance,
states.ERROR)
except Exception:
LOG.error(_("Error while recording destroy failure in "
"baremetal database: %s") % e)
def power_off(self, instance, node=None):
"""Power off the specified instance."""
if not node:
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
pm = get_power_manager(node=node, instance=instance)
pm.deactivate_node()
if pm.state != states.DELETED:
raise exception.InstancePowerOffFailure(_(
"Baremetal power manager failed to stop node "
"for instance %r") % instance['uuid'])
pm.stop_console()
def power_on(self, instance, node=None):
"""Power on the specified instance."""
if not node:
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
pm = get_power_manager(node=node, instance=instance)
pm.activate_node()
if pm.state != states.ACTIVE:
raise exception.InstancePowerOnFailure(_(
"Baremetal power manager failed to start node "
"for instance %r") % instance['uuid'])
pm.start_console()
def get_volume_connector(self, instance):
return self.volume_driver.get_volume_connector(instance)
def attach_volume(self, connection_info, instance, mountpoint):
return self.volume_driver.attach_volume(connection_info,
instance, mountpoint)
def detach_volume(self, connection_info, instance_name, mountpoint):
return self.volume_driver.detach_volume(connection_info,
instance_name, mountpoint)
def get_info(self, instance):
# NOTE(deva): compute/manager.py expects to get NotFound exception
# so we convert from InstanceNotFound
inst_uuid = instance.get('uuid')
node = _get_baremetal_node_by_instance_uuid(inst_uuid)
pm = get_power_manager(node=node, instance=instance)
ps = power_state.SHUTDOWN
if pm.is_power_on():
ps = power_state.RUNNING
return {'state': ps,
'max_mem': node['memory_mb'],
'mem': node['memory_mb'],
'num_cpu': node['cpus'],
'cpu_time': 0}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
return True
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
return True
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def _node_resource(self, node):
vcpus_used = 0
memory_mb_used = 0
local_gb_used = 0
vcpus = node['cpus']
memory_mb = node['memory_mb']
local_gb = node['local_gb']
if node['instance_uuid']:
vcpus_used = node['cpus']
memory_mb_used = node['memory_mb']
local_gb_used = node['local_gb']
dic = {'vcpus': vcpus,
'memory_mb': memory_mb,
'local_gb': local_gb,
'vcpus_used': vcpus_used,
'memory_mb_used': memory_mb_used,
'local_gb_used': local_gb_used,
'hypervisor_type': self.get_hypervisor_type(),
'hypervisor_version': self.get_hypervisor_version(),
'hypervisor_hostname': str(node['uuid']),
'cpu_info': 'baremetal cpu',
}
return dic
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def get_available_resource(self, nodename):
context = nova_context.get_admin_context()
resource = {}
try:
node = db.bm_node_get_by_node_uuid(context, nodename)
resource = self._node_resource(node)
except exception.NodeNotFoundByUUID:
pass
return resource
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
self.firewall_driver.setup_basic_filtering(instance_ref, network_info)
self.firewall_driver.prepare_instance_filter(instance_ref,
network_info)
def unfilter_instance(self, instance_ref, network_info):
self.firewall_driver.unfilter_instance(instance_ref,
network_info=network_info)
def get_host_stats(self, refresh=False):
caps = []
context = nova_context.get_admin_context()
nodes = db.bm_node_get_all(context,
service_host=CONF.host)
for node in nodes:
res = self._node_resource(node)
nodename = str(node['uuid'])
data = {}
data['vcpus'] = res['vcpus']
data['vcpus_used'] = res['vcpus_used']
data['cpu_info'] = res['cpu_info']
data['disk_total'] = res['local_gb']
data['disk_used'] = res['local_gb_used']
data['disk_available'] = res['local_gb'] - res['local_gb_used']
data['host_memory_total'] = res['memory_mb']
data['host_memory_free'] = res['memory_mb'] - res['memory_mb_used']
data['hypervisor_type'] = res['hypervisor_type']
data['hypervisor_version'] = res['hypervisor_version']
data['hypervisor_hostname'] = nodename
data['supported_instances'] = self.supported_instances
data.update(self.extra_specs)
data['host'] = CONF.host
data['node'] = nodename
# TODO(NTTdocomo): put node's extra specs here
caps.append(data)
return caps
def plug_vifs(self, instance, network_info):
"""Plugin VIFs into networks."""
self._plug_vifs(instance, network_info)
def _plug_vifs(self, instance, network_info, context=None):
if not context:
context = nova_context.get_admin_context()
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
if node:
pifs = db.bm_interface_get_all_by_bm_node_id(context, node['id'])
for pif in pifs:
if pif['vif_uuid']:
db.bm_interface_set_vif_uuid(context, pif['id'], None)
for (network, mapping) in network_info:
self.vif_driver.plug(instance, (network, mapping))
def _unplug_vifs(self, instance, network_info):
for (network, mapping) in network_info:
self.vif_driver.unplug(instance, (network, mapping))
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self.image_cache_manager.verify_base_images(context, all_instances)
def get_console_output(self, instance):
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
return self.driver.get_console_output(node, instance)
def get_available_nodes(self):
context = nova_context.get_admin_context()
return [str(n['uuid']) for n in
db.bm_node_get_all(context, service_host=CONF.host)]

View File

@ -1,18 +0,0 @@
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
#for $ifc in $interfaces
auto ${ifc.name}
iface ${ifc.name} inet dhcp
#if $use_ipv6
iface ${ifc.name} inet6 dhcp
#end if
#end for

View File

@ -1,27 +0,0 @@
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
#for $ifc in $interfaces
auto ${ifc.name}
iface ${ifc.name} inet static
address ${ifc.address}
netmask ${ifc.netmask}
gateway ${ifc.gateway}
#if $ifc.dns
dns-nameservers ${ifc.dns}
#end if
#if $use_ipv6
iface ${ifc.name} inet6 static
address ${ifc.address_v6}
netmask ${ifc.netmask_v6}
gateway ${ifc.gateway_v6}
#end if
#end for

View File

@ -1,376 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011-2013 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Class for Tilera bare-metal nodes.
"""
import base64
import os
import jinja2
from oslo.config import cfg
from nova.compute import instance_types
from ironic.common import exception
from ironic.common import utils
from ironic.common import states
from ironic import db
from ironic.manager import base
from ironic.openstack.common.db import exception as db_exc
from ironic.openstack.common import fileutils
from ironic.openstack.common import log as logging
tilera_opts = [
cfg.StrOpt('net_config_template',
default='$pybasedir/ironic/net-dhcp.ubuntu.template',
help='Template file for injected network config'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(tilera_opts)
CONF.import_opt('use_ipv6', 'ironic.netconf')
def build_network_config(network_info):
try:
assert isinstance(network_info, list)
except AssertionError:
network_info = [network_info]
interfaces = []
for id, (network, mapping) in enumerate(network_info):
address_v6 = None
gateway_v6 = None
netmask_v6 = None
if CONF.use_ipv6:
address_v6 = mapping['ip6s'][0]['ip']
netmask_v6 = mapping['ip6s'][0]['netmask']
gateway_v6 = mapping['gateway_v6']
interface = {
'name': 'eth%d' % id,
'address': mapping['ips'][0]['ip'],
'gateway': mapping['gateway'],
'netmask': mapping['ips'][0]['netmask'],
'dns': ' '.join(mapping['dns']),
'address_v6': address_v6,
'gateway_v6': gateway_v6,
'netmask_v6': netmask_v6,
}
interfaces.append(interface)
tmpl_path, tmpl_file = os.path.split(CONF.net_config_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
return template.render({'interfaces': interfaces,
'use_ipv6': CONF.use_ipv6})
def get_image_dir_path(instance):
"""Generate the dir for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'])
def get_image_file_path(instance):
"""Generate the full path for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'], 'disk')
def get_tilera_nfs_path(node_id):
"""Generate the path for an instances Tilera nfs."""
tilera_nfs_dir = "fs_" + str(node_id)
return os.path.join(CONF.tftp_root, tilera_nfs_dir)
def get_partition_sizes(instance):
instance_type = instance_types.extract_instance_type(instance)
root_mb = instance_type['root_gb'] * 1024
swap_mb = instance_type['swap']
if swap_mb < 1:
swap_mb = 1
return (root_mb, swap_mb)
def get_tftp_image_info(instance):
"""Generate the paths for tftp files for this instance.
Raises NovaException if
- instance does not contain kernel_id
"""
image_info = {
'kernel': [None, None],
}
try:
image_info['kernel'][0] = str(instance['kernel_id'])
except KeyError:
pass
missing_labels = []
for label in image_info.keys():
(uuid, path) = image_info[label]
if not uuid:
missing_labels.append(label)
else:
image_info[label][1] = os.path.join(CONF.tftp_root,
instance['uuid'], label)
if missing_labels:
raise exception.NovaException(_(
"Can not activate Tilera bootloader. "
"The following boot parameters "
"were not passed to baremetal driver: %s") % missing_labels)
return image_info
class Tilera(base.NodeDriver):
"""Tilera bare metal driver."""
def __init__(self, virtapi):
super(Tilera, self).__init__(virtapi)
def _collect_mac_addresses(self, context, node):
macs = set()
for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']):
if nic['address']:
macs.add(nic['address'])
return sorted(macs)
def _cache_tftp_images(self, context, instance, image_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
fileutils.ensure_tree(
os.path.join(CONF.tftp_root, instance['uuid']))
LOG.debug(_("Fetching kernel and ramdisk for instance %s") %
instance['name'])
for label in image_info.keys():
(uuid, path) = image_info[label]
utils.cache_image(
context=context,
target=path,
image_id=uuid,
user_id=instance['user_id'],
project_id=instance['project_id'],
)
def _cache_image(self, context, instance, image_meta):
"""Fetch the instance's image from Glance
This method pulls the relevant AMI and associated kernel and ramdisk,
and the deploy kernel and ramdisk from Glance, and writes them
to the appropriate places on local disk.
Both sets of kernel and ramdisk are needed for Tilera booting, so these
are stored under CONF.tftp_root.
At present, the AMI is cached and certain files are injected.
Debian/ubuntu-specific assumptions are made regarding the injected
files. In a future revision, this functionality will be replaced by a
more scalable and os-agnostic approach: the deployment ramdisk will
fetch from Glance directly, and write its own last-mile configuration.
"""
fileutils.ensure_tree(get_image_dir_path(instance))
image_path = get_image_file_path(instance)
LOG.debug(_("Fetching image %(ami)s for instance %(name)s") %
{'ami': image_meta['id'], 'name': instance['name']})
utils.cache_image(context=context,
target=image_path,
image_id=image_meta['id'],
user_id=instance['user_id'],
project_id=instance['project_id']
)
return [image_meta['id'], image_path]
def _inject_into_image(self, context, node, instance, network_info,
injected_files=None, admin_password=None):
"""Inject last-mile configuration into instances image
Much of this method is a hack around DHCP and cloud-init
not working together with baremetal provisioning yet.
"""
partition = None
if not instance['kernel_id']:
partition = "1"
ssh_key = None
if 'key_data' in instance and instance['key_data']:
ssh_key = str(instance['key_data'])
if injected_files is None:
injected_files = []
else:
injected_files = list(injected_files)
net_config = build_network_config(network_info)
if instance['hostname']:
injected_files.append(('/etc/hostname', instance['hostname']))
LOG.debug(_("Injecting files into image for instance %(name)s") %
{'name': instance['name']})
utils.inject_into_image(
image=get_image_file_path(instance),
key=ssh_key,
net=net_config,
metadata=instance['metadata'],
admin_password=admin_password,
files=injected_files,
partition=partition,
)
def cache_images(self, context, node, instance,
admin_password, image_meta, injected_files, network_info):
"""Prepare all the images for this instance."""
tftp_image_info = get_tftp_image_info(instance)
self._cache_tftp_images(context, instance, tftp_image_info)
self._cache_image(context, instance, image_meta)
self._inject_into_image(context, node, instance, network_info,
injected_files, admin_password)
def destroy_images(self, context, node, instance):
"""Delete instance's image file."""
utils.unlink_without_raise(get_image_file_path(instance))
utils.rmtree_without_raise(get_image_dir_path(instance))
def activate_bootloader(self, context, node, instance):
"""Configure Tilera boot loader for an instance
Kernel and ramdisk images are downloaded by cache_tftp_images,
and stored in /tftpboot/{uuid}/
This method writes the instances config file, and then creates
symlinks for each MAC address in the instance.
By default, the complete layout looks like this:
/tftpboot/
./{uuid}/
kernel
./fs_node_id/
"""
(root_mb, swap_mb) = get_partition_sizes(instance)
tilera_nfs_path = get_tilera_nfs_path(node['id'])
image_file_path = get_image_file_path(instance)
deployment_key = utils.random_alnum(32)
db.bm_node_update(context, node['id'],
{'deploy_key': deployment_key,
'image_path': image_file_path,
'pxe_config_path': tilera_nfs_path,
'root_mb': root_mb,
'swap_mb': swap_mb})
if os.path.exists(image_file_path) and \
os.path.exists(tilera_nfs_path):
utils.execute('mount', '-o', 'loop', image_file_path,
tilera_nfs_path, run_as_root=True)
def deactivate_bootloader(self, context, node, instance):
"""Delete Tilera bootloader images and config."""
try:
db.bm_node_update(context, node['id'],
{'deploy_key': None,
'image_path': None,
'pxe_config_path': None,
'root_mb': 0,
'swap_mb': 0})
except exception.NodeNotFound:
pass
tilera_nfs_path = get_tilera_nfs_path(node['id'])
if os.path.ismount(tilera_nfs_path):
utils.execute('rpc.mountd', run_as_root=True)
utils.execute('umount', '-f', tilera_nfs_path, run_as_root=True)
try:
image_info = get_tftp_image_info(instance)
except exception.NovaException:
pass
else:
for label in image_info.keys():
(uuid, path) = image_info[label]
utils.unlink_without_raise(path)
try:
self._collect_mac_addresses(context, node)
except db_exc.DBError:
pass
if os.path.exists(os.path.join(CONF.tftp_root,
instance['uuid'])):
utils.rmtree_without_raise(
os.path.join(CONF.tftp_root, instance['uuid']))
def _iptables_set(self, node_ip, user_data):
"""Sets security setting (iptables:port) if needed.
iptables -A INPUT -p tcp ! -s $IP --dport $PORT -j DROP
/tftpboot/iptables_rule script sets iptables rule on the given node.
"""
rule_path = CONF.tftp_root + "/iptables_rule"
if user_data is not None:
open_ip = base64.b64decode(user_data)
utils.execute(rule_path, node_ip, open_ip)
def activate_node(self, context, node, instance):
"""Wait for Tilera deployment to complete."""
locals = {'error': '', 'started': False}
try:
row = db.bm_node_get(context, node['id'])
if instance['uuid'] != row.get('instance_uuid'):
locals['error'] = _("Node associated with another instance"
" while waiting for deploy of %s")
status = row.get('task_state')
if (status == states.DEPLOYING and
locals['started'] is False):
LOG.info(_('Tilera deploy started for instance %s')
% instance['uuid'])
locals['started'] = True
elif status in (states.DEPLOYDONE,
states.BUILDING,
states.ACTIVE):
LOG.info(_("Tilera deploy completed for instance %s")
% instance['uuid'])
node_ip = node['pm_address']
user_data = instance['user_data']
try:
self._iptables_set(node_ip, user_data)
except Exception:
self.deactivate_bootloader(context, node, instance)
raise exception.NovaException(_("Node is "
"unknown error state."))
elif status == states.DEPLOYFAIL:
locals['error'] = _("Tilera deploy failed for instance %s")
except exception.NodeNotFound:
locals['error'] = _("Baremetal node deleted while waiting "
"for deployment of instance %s")
if locals['error']:
raise exception.InstanceDeployFailure(
locals['error'] % instance['uuid'])
def deactivate_node(self, context, node, instance):
pass

View File

@ -1,170 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright (c) 2011-2013 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Baremetal PDU power manager.
"""
import time
from oslo.config import cfg
from ironic.common import exception
from ironic.common import states
from ironic.common import utils
from ironic.manager import base
from ironic.openstack.common import log as logging
opts = [
cfg.StrOpt('tile_pdu_ip',
default='10.0.100.1',
help='ip address of tilera pdu'),
cfg.StrOpt('tile_pdu_mgr',
default='/tftpboot/pdu_mgr',
help='management script for tilera pdu'),
cfg.IntOpt('tile_pdu_off',
default=2,
help='power status of tilera PDU is OFF'),
cfg.IntOpt('tile_pdu_on',
default=1,
help='power status of tilera PDU is ON'),
cfg.IntOpt('tile_pdu_status',
default=9,
help='power status of tilera PDU'),
cfg.IntOpt('tile_power_wait',
default=9,
help='wait time in seconds until check the result '
'after tilera power operations'),
]
CONF = cfg.CONF
CONF.register_opts(opts)
LOG = logging.getLogger(__name__)
class Pdu(base.PowerManager):
"""PDU Power Driver for Baremetal Nova Compute
This PowerManager class provides mechanism for controlling the power state
of physical hardware via PDU calls.
"""
def __init__(self, node, **kwargs):
self.state = None
self.retries = None
self.node_id = node['id']
self.address = node['pm_address']
self.user = node['pm_user']
self.password = node['pm_password']
self.port = node['terminal_port']
if self.node_id is None:
raise exception.InvalidParameterValue(_("Node id not supplied "
"to PDU"))
if self.address is None:
raise exception.InvalidParameterValue(_("Address not supplied "
"to PDU"))
if self.user is None:
raise exception.InvalidParameterValue(_("User not supplied "
"to PDU"))
if self.password is None:
raise exception.InvalidParameterValue(_("Password not supplied "
"to PDU"))
def _exec_pdutool(self, mode):
"""Changes power state of the given node.
According to the mode (1-ON, 2-OFF, 3-REBOOT), power state can be
changed. /tftpboot/pdu_mgr script handles power management of
PDU (Power Distribution Unit).
"""
if mode == CONF.tile_pdu_status:
try:
utils.execute('ping', '-c1', self.address,
check_exit_code=True)
return CONF.tile_pdu_on
except exception.ProcessExecutionError:
return CONF.tile_pdu_off
else:
try:
utils.execute(CONF.tile_pdu_mgr,
CONF.tile_pdu_ip, mode)
time.sleep(CONF.tile_power_wait)
return mode
except exception.ProcessExecutionError:
LOG.exception(_("PDU failed"))
def _is_power(self, state):
out_err = self._exec_pdutool(CONF.tile_pdu_status)
return out_err == state
def _power_on(self):
"""Turn the power to this node ON."""
try:
self._exec_pdutool(CONF.tile_pdu_on)
if self._is_power(CONF.tile_pdu_on):
self.state = states.ACTIVE
else:
self.state = states.ERROR
except Exception:
self.state = states.ERROR
LOG.exception(_("PDU power on failed"))
def _power_off(self):
"""Turn the power to this node OFF."""
try:
self._exec_pdutool(CONF.tile_pdu_off)
if self._is_power(CONF.tile_pdu_off):
self.state = states.DELETED
else:
self.state = states.ERROR
except Exception:
self.state = states.ERROR
LOG.exception(_("PDU power off failed"))
def activate_node(self):
"""Turns the power to node ON."""
if (self._is_power(CONF.tile_pdu_on)
and self.state == states.ACTIVE):
LOG.warning(_("Activate node called, but node %s "
"is already active") % self.address)
self._power_on()
return self.state
def reboot_node(self):
"""Cycles the power to a node."""
self._power_off()
self._power_on()
return self.state
def deactivate_node(self):
"""Turns the power to node OFF, regardless of current state."""
self._power_off()
return self.state
def is_power_on(self):
return self._is_power(CONF.tile_pdu_on)
def start_console(self):
pass
def stop_console(self):
pass

View File

@ -1,75 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from ironic.common import context as nova_context
from ironic.common import exception
from ironic.openstack.common import log as logging
from ironic import db
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class BareMetalVIFDriver(object):
def _after_plug(self, instance, network, mapping, pif):
pass
def _after_unplug(self, instance, network, mapping, pif):
pass
def plug(self, instance, vif):
LOG.debug(_("plug: instance_uuid=%(uuid)s vif=%(vif)s")
% {'uuid': instance['uuid'], 'vif': vif})
network, mapping = vif
vif_uuid = mapping['vif_uuid']
ctx = nova_context.get_admin_context()
node = db.bm_node_get_by_instance_uuid(ctx, instance['uuid'])
# TODO(deva): optimize this database query
# this is just searching for a free physical interface
pifs = db.bm_interface_get_all_by_bm_node_id(ctx, node['id'])
for pif in pifs:
if not pif['vif_uuid']:
db.bm_interface_set_vif_uuid(ctx, pif['id'], vif_uuid)
LOG.debug(_("pif:%(id)s is plugged (vif_uuid=%(vif_uuid)s)")
% {'id': pif['id'], 'vif_uuid': vif_uuid})
self._after_plug(instance, network, mapping, pif)
return
# NOTE(deva): should this really be raising an exception
# when there are no physical interfaces left?
raise exception.NovaException(_(
"Baremetal node: %(id)s has no available physical interface"
" for virtual interface %(vif_uuid)s")
% {'id': node['id'], 'vif_uuid': vif_uuid})
def unplug(self, instance, vif):
LOG.debug(_("unplug: instance_uuid=%(uuid)s vif=%(vif)s"),
{'uuid': instance['uuid'], 'vif': vif})
network, mapping = vif
vif_uuid = mapping['vif_uuid']
ctx = nova_context.get_admin_context()
try:
pif = db.bm_interface_get_by_vif_uuid(ctx, vif_uuid)
db.bm_interface_set_vif_uuid(ctx, pif['id'], None)
LOG.debug(_("pif:%(id)s is unplugged (vif_uuid=%(vif_uuid)s)")
% {'id': pif['id'], 'vif_uuid': vif_uuid})
self._after_unplug(instance, network, mapping, pif)
except exception.NovaException:
LOG.warn(_("no pif for vif_uuid=%s") % vif_uuid)

View File

@ -1,262 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo.config import cfg
from ironic.common import context as nova_context
from ironic.common import exception
from ironic.common import utils
from ironic.openstack.common import importutils
from ironic.openstack.common import log as logging
from ironic import db
from nova.virt.libvirt import utils as libvirt_utils
opts = [
cfg.BoolOpt('use_unsafe_iscsi',
default=False,
help='Do not set this out of dev/test environments. '
'If a node does not have a fixed PXE IP address, '
'volumes are exported with globally opened ACL'),
cfg.StrOpt('iscsi_iqn_prefix',
default='iqn.2010-10.org.openstack.baremetal',
help='iSCSI IQN prefix used in baremetal volume connections.'),
]
CONF = cfg.CONF
CONF.register_opts(opts)
CONF.import_opt('host', 'ironic.netconf')
CONF.import_opt('use_ipv6', 'ironic.netconf')
CONF.import_opt('libvirt_volume_drivers', 'nova.virt.libvirt.driver')
LOG = logging.getLogger(__name__)
def _get_baremetal_node_by_instance_uuid(instance_uuid):
context = nova_context.get_admin_context()
return db.bm_node_get_by_instance_uuid(context, instance_uuid)
def _create_iscsi_export_tgtadm(path, tid, iqn):
utils.execute('tgtadm', '--lld', 'iscsi',
'--mode', 'target',
'--op', 'new',
'--tid', tid,
'--targetname', iqn,
run_as_root=True)
utils.execute('tgtadm', '--lld', 'iscsi',
'--mode', 'logicalunit',
'--op', 'new',
'--tid', tid,
'--lun', '1',
'--backing-store', path,
run_as_root=True)
def _allow_iscsi_tgtadm(tid, address):
utils.execute('tgtadm', '--lld', 'iscsi',
'--mode', 'target',
'--op', 'bind',
'--tid', tid,
'--initiator-address', address,
run_as_root=True)
def _delete_iscsi_export_tgtadm(tid):
try:
utils.execute('tgtadm', '--lld', 'iscsi',
'--mode', 'logicalunit',
'--op', 'delete',
'--tid', tid,
'--lun', '1',
run_as_root=True)
except exception.ProcessExecutionError:
pass
try:
utils.execute('tgtadm', '--lld', 'iscsi',
'--mode', 'target',
'--op', 'delete',
'--tid', tid,
run_as_root=True)
except exception.ProcessExecutionError:
pass
# Check if the tid is deleted, that is, check the tid no longer exists.
# If the tid dose not exist, tgtadm returns with exit_code 22.
# utils.execute() can check the exit_code if check_exit_code parameter is
# passed. But, regardless of whether check_exit_code contains 0 or not,
# if the exit_code is 0, the function dose not report errors. So we have to
# catch a ProcessExecutionError and test its exit_code is 22.
try:
utils.execute('tgtadm', '--lld', 'iscsi',
'--mode', 'target',
'--op', 'show',
'--tid', tid,
run_as_root=True)
except exception.ProcessExecutionError as e:
if e.exit_code == 22:
# OK, the tid is deleted
return
raise
raise exception.NovaException(_(
'baremetal driver was unable to delete tid %s') % tid)
def _show_tgtadm():
out, _ = utils.execute('tgtadm', '--lld', 'iscsi',
'--mode', 'target',
'--op', 'show',
run_as_root=True)
return out
def _list_backingstore_path():
out = _show_tgtadm()
l = []
for line in out.split('\n'):
m = re.search(r'Backing store path: (.*)$', line)
if m:
if '/' in m.group(1):
l.append(m.group(1))
return l
def _get_next_tid():
out = _show_tgtadm()
last_tid = 0
for line in out.split('\n'):
m = re.search(r'^Target (\d+):', line)
if m:
tid = int(m.group(1))
if last_tid < tid:
last_tid = tid
return last_tid + 1
def _find_tid(iqn):
out = _show_tgtadm()
pattern = r'^Target (\d+): *' + re.escape(iqn)
for line in out.split('\n'):
m = re.search(pattern, line)
if m:
return int(m.group(1))
return None
def _get_iqn(instance_name, mountpoint):
mp = mountpoint.replace('/', '-').strip('-')
iqn = '%s:%s-%s' % (CONF.iscsi_iqn_prefix,
instance_name,
mp)
return iqn
class VolumeDriver(object):
def __init__(self, virtapi):
super(VolumeDriver, self).__init__()
self.virtapi = virtapi
self._initiator = None
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = libvirt_utils.get_iscsi_initiator()
if not self._initiator:
LOG.warn(_('Could not determine iscsi initiator name '
'for instance %s') % instance)
return {
'ip': CONF.my_ip,
'initiator': self._initiator,
'host': CONF.host,
}
def attach_volume(self, connection_info, instance, mountpoint):
raise NotImplementedError()
def detach_volume(self, connection_info, instance, mountpoint):
raise NotImplementedError()
class LibvirtVolumeDriver(VolumeDriver):
"""The VolumeDriver deligates to nova.virt.libvirt.volume."""
def __init__(self, virtapi):
super(LibvirtVolumeDriver, self).__init__(virtapi)
self.volume_drivers = {}
for driver_str in CONF.libvirt_volume_drivers:
driver_type, _sep, driver = driver_str.partition('=')
driver_class = importutils.import_class(driver)
self.volume_drivers[driver_type] = driver_class(self)
def _volume_driver_method(self, method_name, connection_info,
*args, **kwargs):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
method = getattr(driver, method_name)
return method(connection_info, *args, **kwargs)
def attach_volume(self, connection_info, instance, mountpoint):
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
ctx = nova_context.get_admin_context()
pxe_ip = db.bm_pxe_ip_get_by_bm_node_id(ctx, node['id'])
if not pxe_ip:
if not CONF.use_unsafe_iscsi:
raise exception.NovaException(_(
'No fixed PXE IP is associated to %s') % instance['uuid'])
mount_device = mountpoint.rpartition("/")[2]
self._volume_driver_method('connect_volume',
connection_info,
mount_device)
device_path = connection_info['data']['device_path']
iqn = _get_iqn(instance['name'], mountpoint)
tid = _get_next_tid()
_create_iscsi_export_tgtadm(device_path, tid, iqn)
if pxe_ip:
_allow_iscsi_tgtadm(tid, pxe_ip['address'])
else:
# NOTE(NTTdocomo): Since nova-compute does not know the
# instance's initiator ip, it allows any initiators
# to connect to the volume. This means other bare-metal
# instances that are not attached the volume can connect
# to the volume. Do not set CONF.use_unsafe_iscsi
# out of dev/test environments.
# TODO(NTTdocomo): support CHAP
_allow_iscsi_tgtadm(tid, 'ALL')
def detach_volume(self, connection_info, instance, mountpoint):
mount_device = mountpoint.rpartition("/")[2]
try:
iqn = _get_iqn(instance['name'], mountpoint)
tid = _find_tid(iqn)
if tid is not None:
_delete_iscsi_export_tgtadm(tid)
else:
LOG.warn(_('detach volume could not find tid for %s') % iqn)
finally:
self._volume_driver_method('disconnect_volume',
connection_info,
mount_device)
def get_all_block_devices(self):
"""Return all block devices in use on this node."""
return _list_backingstore_path()