Delete hwm(Hardware Management) related code

Daisy once used hwm to discover & provision hosts. But for opensource
we can not asume hwm exists all the time so we have to make it a
pluggable module before we can accept it. Currently just kill it.

Daisy should still be able to discover & provision hosts without
hwm.

Change-Id: I59d19718b0113f1c1d2ccedb968b8d594e90f670
Signed-off-by: Zhijiang Hu <hu.zhijiang@zte.com.cn>
This commit is contained in:
Zhijiang Hu 2016-09-12 22:18:06 -04:00
parent 321a5d6e1f
commit 058fc901ae
48 changed files with 272 additions and 8586 deletions

View File

@ -40,6 +40,48 @@ kolla_backend_name = "kolla"
os_install_start_time = 0.0
# This is used for mapping daisy service id to systemctl service name
# Only used by non containerized deploy tools such as clush/puppet.
service_map = {
'lb': 'haproxy',
'mongodb': 'mongod',
'ha': '',
'mariadb': 'mariadb',
'amqp': 'rabbitmq-server',
'ceilometer-api': 'openstack-ceilometer-api',
'ceilometer-collector': 'openstack-ceilometer-collector,\
openstack-ceilometer-mend',
'ceilometer-central': 'openstack-ceilometer-central',
'ceilometer-notification': 'openstack-ceilometer-notification',
'ceilometer-alarm': 'openstack-ceilometer-alarm-evaluator,\
openstack-ceilometer-alarm-notifier',
'heat-api': 'openstack-heat-api',
'heat-api-cfn': 'openstack-heat-api-cfn',
'heat-engine': 'openstack-heat-engine',
'ironic': 'openstack-ironic-api,openstack-ironic-conductor',
'horizon': 'httpd,opencos-alarmmanager',
'keystone': 'openstack-keystone',
'glance': 'openstack-glance-api,openstack-glance-registry',
'cinder-volume': 'openstack-cinder-volume',
'cinder-scheduler': 'openstack-cinder-scheduler',
'cinder-api': 'openstack-cinder-api',
'neutron-metadata': 'neutron-metadata-agent',
'neutron-lbaas': 'neutron-lbaas-agent',
'neutron-dhcp': 'neutron-dhcp-agent',
'neutron-server': 'neutron-server',
'neutron-l3': 'neutron-l3-agent',
'compute': 'openstack-nova-compute',
'nova-cert': 'openstack-nova-cert',
'nova-sched': 'openstack-nova-scheduler',
'nova-vncproxy': 'openstack-nova-novncproxy,openstack-nova-consoleauth',
'nova-conductor': 'openstack-nova-conductor',
'nova-api': 'openstack-nova-api',
'nova-cells': 'openstack-nova-cells',
'camellia-api': 'camellia-api'
}
def subprocess_call(command, file=None):
if file:
return_code = subprocess.call(command,
@ -366,3 +408,168 @@ def calc_host_iqn(min_mac):
get_uuid = stdoutput.split('=')[1]
iqn = "iqn.opencos.rh:" + get_uuid.strip()
return iqn
def _get_cluster_network(cluster_networks, network_type):
network = [cn for cn in cluster_networks if cn['name'] in network_type]
if not network or not network[0]:
msg = "network %s is not exist" % (network_type)
raise exception.InvalidNetworkConfig(msg)
else:
return network[0]
def get_host_interface_by_network(host_detail, network_type):
host_detail_info = copy.deepcopy(host_detail)
interface_list = [hi for hi in host_detail_info['interfaces']
for assigned_network in hi['assigned_networks']
if assigned_network and
network_type == assigned_network['name']]
interface = {}
if interface_list:
interface = interface_list[0]
if not interface:
msg = "network %s of host %s is not exist" % (
network_type, host_detail_info['id'])
raise exception.InvalidNetworkConfig(msg)
return interface
def get_host_network_ip(req, host_detail, cluster_networks, network_name):
interface_network_ip = ''
host_interface = get_host_interface_by_network(host_detail, network_name)
if host_interface:
network = _get_cluster_network(cluster_networks, network_name)
assigned_network = get_assigned_network(req,
host_interface['id'],
network['id'])
interface_network_ip = assigned_network['ip']
if not interface_network_ip and 'MANAGEMENT' == network_name:
msg = "%s network ip of host %s can't be empty" % (
network_name, host_detail['id'])
raise exception.InvalidNetworkConfig(msg)
return interface_network_ip
def get_service_disk_list(req, params):
try:
service_disks = registry.list_service_disk_metadata(
req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return service_disks
def sort_interfaces_by_pci(networks, host_detail):
"""
Sort interfaces by pci segment, if interface type is bond,
user the pci of first memeber nic.This function is fix bug for
the name length of ovs virtual port, because if the name length large than
15 characters, the port will create failed.
:param interfaces: interfaces info of the host
:return:
"""
interfaces = eval(host_detail.get('interfaces', None)) \
if isinstance(host_detail, unicode) else \
host_detail.get('interfaces', None)
if not interfaces:
LOG.info("This host has no interfaces info.")
return host_detail
tmp_interfaces = copy.deepcopy(interfaces)
slaves_name_list = []
for interface in tmp_interfaces:
if interface.get('type', None) == "bond" and\
interface.get('slave1', None) and\
interface.get('slave2', None):
slaves_name_list.append(interface['slave1'])
slaves_name_list.append(interface['slave2'])
for interface in interfaces:
if interface.get('name') not in slaves_name_list:
vlan_id_len_list = [len(network['vlan_id'])
for assigned_network in interface.get(
'assigned_networks', [])
for network in networks
if assigned_network.get('name') ==
network.get('name') and network.get('vlan_id')]
max_vlan_id_len = max(vlan_id_len_list) if vlan_id_len_list else 0
interface_name_len = len(interface['name'])
redundant_bit = interface_name_len + max_vlan_id_len - 14
interface['name'] = interface['name'][
redundant_bit:] if redundant_bit > 0 else interface['name']
return host_detail
def run_scrip(script, ip=None, password=None, msg=None):
try:
_run_scrip(script, ip, password)
except:
msg1 = 'Error occurred during running scripts.'
message = msg1 + msg if msg else msg1
LOG.error(message)
raise HTTPForbidden(explanation=message)
else:
LOG.info('Running scripts successfully!')
def _run_scrip(script, ip=None, password=None):
mask_list = []
repl_list = [("'", "'\\''")]
script = "\n".join(script)
_PIPE = subprocess.PIPE
if ip:
cmd = ["sshpass", "-p", "%s" % password,
"ssh", "-o StrictHostKeyChecking=no",
"%s" % ip, "bash -x"]
else:
cmd = ["bash", "-x"]
environ = os.environ
environ['LANG'] = 'en_US.UTF8'
obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE,
close_fds=True, shell=False, env=environ)
script = "function t(){ exit $? ; } \n trap t ERR \n" + script
out, err = obj.communicate(script)
masked_out = mask_string(out, mask_list, repl_list)
masked_err = mask_string(err, mask_list, repl_list)
if obj.returncode:
pattern = (r'^ssh\:')
if re.search(pattern, err):
LOG.error(_("Network error occured when run script."))
raise exception.NetworkError(masked_err, stdout=out, stderr=err)
else:
msg = ('Failed to run remote script, stdout: %s\nstderr: %s' %
(masked_out, masked_err))
LOG.error(msg)
raise exception.ScriptRuntimeError(msg, stdout=out, stderr=err)
return obj.returncode, out
def get_ctl_ha_nodes_min_mac(req, cluster_id):
'''
ctl_ha_nodes_min_mac = {'host_name1':'min_mac1', ...}
'''
ctl_ha_nodes_min_mac = {}
roles = get_cluster_roles_detail(req, cluster_id)
cluster_networks =\
get_cluster_networks_detail(req, cluster_id)
for role in roles:
if role['deployment_backend'] != tecs_backend_name:
continue
role_hosts = get_hosts_of_role(req, role['id'])
for role_host in role_hosts:
# host has installed tecs are exclusive
if (role_host['status'] == TECS_STATE['ACTIVE'] or
role_host['status'] == TECS_STATE['UPDATING'] or
role_host['status'] == TECS_STATE['UPDATE_FAILED']):
continue
host_detail = get_host_detail(req,
role_host['host_id'])
host_name = host_detail['name']
if role['name'] == "CONTROLLER_HA":
min_mac = utils.get_host_min_mac(host_detail['interfaces'])
ctl_ha_nodes_min_mac[host_name] = min_mac
return ctl_ha_nodes_min_mac

View File

@ -14,7 +14,7 @@
# under the License.
"""
/install endpoint for tecs API
/install endpoint for daisy API
"""
import copy
import subprocess
@ -24,7 +24,6 @@ import json
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
import threading
from daisy import i18n
@ -32,13 +31,9 @@ from daisy.common import exception
from daisy.api import common
from daisy.common import utils
import daisy.registry.client.v1.api as registry
from daisyclient.v1 import client as daisy_client
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn
import ConfigParser
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
@ -72,17 +67,6 @@ LINUX_BOND_MODE = {'balance-rr': '0', 'active-backup': '1',
'802.3ad': '4', 'balance-tlb': '5',
'balance-alb': '6'}
daisy_tecs_path = tecs_cmn.daisy_tecs_path
def get_daisyclient():
"""Get Daisy client instance."""
config_daisy = ConfigParser.ConfigParser()
config_daisy.read("/etc/daisy/daisy-api.conf")
daisy_port = config_daisy.get("DEFAULT", "bind_port")
args = {'version': 1.0, 'endpoint': 'http://127.0.0.1:' + daisy_port}
return daisy_client.Client(**args)
def pxe_server_build(req, install_meta):
params = {'filters': {'type': 'system'}}
@ -179,7 +163,7 @@ def _get_network_plat(req, host_config, cluster_networks, dhcp_mac):
alias.append(cluster_network['alias'])
# convert cidr to netmask
cidr_to_ip = ""
assigned_networks_ip = tecs_cmn.get_host_network_ip(
assigned_networks_ip = daisy_cmn.get_host_network_ip(
req, host_config_orig, cluster_networks, network_name)
if cluster_network.get('cidr', None):
inter_ip = lambda x: '.'.join(
@ -243,7 +227,7 @@ def get_cluster_hosts_config(req, cluster_id):
role['name'] in host_detail['role'] and\
role['nova_lv_size']:
host_detail['nova_lv_size'] = role['nova_lv_size']
service_disks = tecs_cmn.get_service_disk_list(
service_disks = daisy_cmn.get_service_disk_list(
req, {'role_id': role['id']})
for service_disk in service_disks:
if service_disk['disk_location'] == 'local' and\
@ -278,32 +262,11 @@ def get_cluster_hosts_config(req, cluster_id):
host_config = _get_network_plat(req, host_config_detail,
networks,
pxe_macs[0])
hosts_config.append(tecs_cmn.sort_interfaces_by_pci(networks,
host_config))
hosts_config.append(daisy_cmn.sort_interfaces_by_pci(networks,
host_config))
return hosts_config
def check_tfg_exist():
get_tfg_patch = "ls %s|grep CGSL_VPLAT-.*\.iso$" % daisy_tecs_path
obj = subprocess.Popen(get_tfg_patch,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
tfg_patch_pkg_file = ""
tfg_patch_pkg_name = ""
if stdoutput:
tfg_patch_pkg_name = stdoutput.split('\n')[0]
tfg_patch_pkg_file = daisy_tecs_path + tfg_patch_pkg_name
chmod_for_tfg_bin = 'chmod +x %s' % tfg_patch_pkg_file
daisy_cmn.subprocess_call(chmod_for_tfg_bin)
if not stdoutput or not tfg_patch_pkg_name:
LOG.info(_("no CGSL_VPLAT iso file got in %s" % daisy_tecs_path))
return ""
return tfg_patch_pkg_file
def update_db_host_status(req, host_id, host_status):
"""
Update host status and intallation progress to db.
@ -339,7 +302,6 @@ class OSInstall():
self.max_parallel_os_num = int(CONF.max_parallel_os_number)
self.cluster_hosts_install_timeout = (
self.max_parallel_os_num / 4 + 2) * 60 * (12 * self.time_step)
self.daisyclient = get_daisyclient()
def _set_boot_or_power_state(self, user, passwd, addr, action):
count = 0
@ -449,25 +411,17 @@ class OSInstall():
hugepagesize = '1G'
# tfg_patch_pkg_file = check_tfg_exist()
if host_detail.get('hwm_id'):
host_hwm_meta = {
"hwm_ip": host_detail.get('hwm_ip'),
"hwm_id": host_detail.get('hwm_id'),
"boot_type": "pxe"
}
self.daisyclient.node.set_boot(**host_hwm_meta)
else:
if (not host_detail['ipmi_user'] or
not host_detail['ipmi_passwd'] or
not host_detail['ipmi_addr']):
self.message = "Invalid ipmi information configed for host %s"\
% host_detail['id']
raise exception.NotFound(message=self.message)
if (not host_detail['ipmi_user'] or
not host_detail['ipmi_passwd'] or
not host_detail['ipmi_addr']):
self.message = "Invalid ipmi information configed for host %s" \
% host_detail['id']
raise exception.NotFound(message=self.message)
self._set_boot_or_power_state(host_detail['ipmi_user'],
host_detail['ipmi_passwd'],
host_detail['ipmi_addr'],
'pxe')
self._set_boot_or_power_state(host_detail['ipmi_user'],
host_detail['ipmi_passwd'],
host_detail['ipmi_addr'],
'pxe')
kwargs = {'hostname': host_detail['name'],
'iso_path': os_version_file,
@ -537,17 +491,10 @@ class OSInstall():
msg = "install os return failed for host %s" % host_detail['id']
raise exception.OSInstallFailed(message=msg)
if host_detail.get('hwm_id'):
host_hwm_meta = {
"hwm_ip": host_detail.get('hwm_ip'),
"hwm_id": host_detail.get('hwm_id')
}
self.daisyclient.node.restart(**host_hwm_meta)
else:
self._set_boot_or_power_state(host_detail['ipmi_user'],
host_detail['ipmi_passwd'],
host_detail['ipmi_addr'],
'reset')
self._set_boot_or_power_state(host_detail['ipmi_user'],
host_detail['ipmi_passwd'],
host_detail['ipmi_addr'],
'reset')
def _begin_install_os(self, hosts_detail):
# all hosts status is set to 'pre-install' before os installing
@ -562,26 +509,15 @@ class OSInstall():
def _set_disk_start_mode(self, host_detail):
LOG.info(_("Set boot from disk for host %s" % (host_detail['id'])))
if host_detail.get('hwm_id'):
host_hwm_meta = {
"hwm_ip": host_detail.get('hwm_ip'),
"hwm_id": host_detail.get('hwm_id'),
"boot_type": "disk"
}
self.daisyclient.node.set_boot(**host_hwm_meta)
LOG.info(_("reboot host %s" % (host_detail['id'])))
host_hwm_meta.pop('boot_type')
self.daisyclient.node.restart(**host_hwm_meta)
else:
self._set_boot_or_power_state(host_detail['ipmi_user'],
host_detail['ipmi_passwd'],
host_detail['ipmi_addr'],
'disk')
LOG.info(_("reboot host %s" % (host_detail['id'])))
self._set_boot_or_power_state(host_detail['ipmi_user'],
host_detail['ipmi_passwd'],
host_detail['ipmi_addr'],
'reset')
self._set_boot_or_power_state(host_detail['ipmi_user'],
host_detail['ipmi_passwd'],
host_detail['ipmi_addr'],
'disk')
LOG.info(_("reboot host %s" % (host_detail['id'])))
self._set_boot_or_power_state(host_detail['ipmi_user'],
host_detail['ipmi_passwd'],
host_detail['ipmi_addr'],
'reset')
def _init_progress(self, host_detail, hosts_status):
host_id = host_detail['id']
@ -742,190 +678,3 @@ class OSInstall():
else:
role_hosts_ids.remove(host_id)
return (hosts_detail, role_hosts_ids)
def _os_thread_bin(req, host_ip, host_id):
host_meta = {}
password = "ossdbg1"
LOG.info(_("Begin update os for host %s." % (host_ip)))
cmd = 'mkdir -p /var/log/daisy/daisy_update/'
daisy_cmn.subprocess_call(cmd)
var_log_path = "/var/log/daisy/daisy_update/%s_update_tfg.log" % host_ip
with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -w %s "mkdir -p /home/daisy_update/"' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s "rm -rf /home/daisy_update/*"' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -w %s -c /var/lib/daisy/tecs/*CGSL_VPLAT*.iso\
/var/lib/daisy/tecs/tfg_upgrade.sh \
--dest=/home/daisy_update' % (
host_ip,)
daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -w %s "chmod 777 /home/daisy_update/*"' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp)
host_meta['os_progress'] = 30
host_meta['os_status'] = host_os_status['UPDATING']
host_meta['messages'] = "os updating,copy iso successfully"
update_db_host_status(req, host_id, host_meta)
try:
exc_result = subprocess.check_output(
'clush -S -w %s "/home/daisy_update/tfg_upgrade.sh"' % (
host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if e.returncode == 255 and "reboot" in e.output.strip():
host_meta['os_progress'] = 100
host_meta['os_status'] = host_os_status['ACTIVE']
host_meta['messages'] = "upgrade tfg successfully,os reboot"
LOG.info(
_("Update tfg for %s successfully,os reboot!" % host_ip))
daisy_cmn.check_reboot_ping(host_ip)
else:
host_meta['os_progress'] = 0
host_meta['os_status'] = host_os_status['UPDATE_FAILED']
host_meta[
'messages'] =\
e.output.strip()[-400:-200].replace('\n', ' ')
LOG.error(_("Update tfg for %s failed!" % host_ip))
update_db_host_status(req, host_id, host_meta)
fp.write(e.output.strip())
else:
host_meta['os_progress'] = 100
host_meta['os_status'] = host_os_status['ACTIVE']
host_meta['messages'] = "upgrade tfg successfully"
update_db_host_status(req, host_id, host_meta)
LOG.info(_("Update os for %s successfully!" % host_ip))
fp.write(exc_result)
if "reboot" in exc_result:
daisy_cmn.check_reboot_ping(host_ip)
# this will be raise raise all the exceptions of the thread to log file
def os_thread_bin(req, host_ip, host_id):
try:
_os_thread_bin(req, host_ip, host_id)
except Exception as e:
LOG.exception(e.message)
raise exception.ThreadBinException(message=e.message)
def _get_host_os_version(host_ip, host_pwd='ossdbg1'):
version = ""
tfg_version_file = '/usr/sbin/tfg_showversion'
try:
subprocess.check_output("sshpass -p %s ssh -o StrictHostKeyChecking=no"
" %s test -f %s" % (host_pwd, host_ip,
tfg_version_file),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
LOG.info(_("Host %s os version is TFG" % host_ip))
return version
try:
process =\
subprocess.Popen(["sshpass", "-p", "%s" % host_pwd, "ssh",
"-o StrictHostKeyChecking=no", "%s" % host_ip,
'tfg_showversion'], shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
version = process.stdout.read().strip('\n')
except subprocess.CalledProcessError:
msg = _("Get host %s os version by subprocess failed!" % host_ip)
raise exception.SubprocessCmdFailed(message=msg)
if version:
LOG.info(_("Host %s os version is %s" % (host_ip, version)))
return version
else:
msg = _("Get host %s os version by tfg_showversion failed!" % host_ip)
LOG.error(msg)
raise exception.Invalid(message=msg)
def _cmp_os_version(new_os_file, old_os_version,
target_host_ip, password='ossdbg1'):
shell_file = '/usr/sbin/tfg_showversion'
if old_os_version:
try:
subprocess.check_output("test -f %s" % shell_file, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
scripts = ["sshpass -p %s scp -r -o\
StrictHostKeyChecking=no %s:%s "
"/usr/sbin/" % (password, target_host_ip, shell_file)]
tecs_cmn.run_scrip(scripts)
cmp_script = "tfg_showversion %s %s" % (new_os_file, old_os_version)
try:
result = subprocess.check_output(cmp_script, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return -1
else:
if new_os_file.find("Mimosa") != -1:
return 0
else:
msg = _("Please use Mimosa os to upgrade instead of TFG")
LOG.error(msg)
raise exception.Forbidden(message=msg)
return result.find("yes")
def upgrade_os(req, hosts_list):
upgrade_hosts = []
max_parallel_os_upgrade_number = int(CONF.max_parallel_os_upgrade_number)
while hosts_list:
host_meta = {}
threads = []
if len(hosts_list) > max_parallel_os_upgrade_number:
upgrade_hosts = hosts_list[:max_parallel_os_upgrade_number]
hosts_list = hosts_list[max_parallel_os_upgrade_number:]
else:
upgrade_hosts = hosts_list
hosts_list = []
new_os_file = check_tfg_exist()
for host_info in upgrade_hosts:
host_id = host_info.keys()[0]
host_ip = host_info.values()[0]
host_detail = daisy_cmn.get_host_detail(req, host_id)
target_host_os = _get_host_os_version(
host_ip, host_detail['root_pwd'])
if _cmp_os_version(new_os_file, target_host_os, host_ip) != -1:
host_meta['os_progress'] = 10
host_meta['os_status'] = host_os_status['UPDATING']
host_meta['messages'] = "os updating,begin copy iso"
update_db_host_status(req, host_id, host_meta)
t = threading.Thread(target=os_thread_bin, args=(req, host_ip,
host_id))
t.setDaemon(True)
t.start()
threads.append(t)
else:
LOG.warn(_("new os version is lower than or equal to that of "
"host %s, don't need to upgrade!" % host_ip))
try:
for t in threads:
t.join()
except:
LOG.warn(_("Join update thread %s failed!" % t))
else:
for host_info in upgrade_hosts:
update_failed_flag = False
host_id = host_info.keys()[0]
host_ip = host_info.values()[0]
host = registry.get_host_metadata(req.context, host_id)
if host['os_status'] == host_os_status['UPDATE_FAILED'] or\
host['os_status'] == host_os_status['INIT']:
update_failed_flag = True
raise exception.ThreadBinException(
"%s update tfg failed! %s" % (
host_ip, host['messages']))
if not update_failed_flag:
host_meta = {}
host_meta['os_progress'] = 100
host_meta['os_status'] = host_os_status['ACTIVE']
host_meta['messages'] = "upgrade tfg successfully"
update_db_host_status(req, host_id, host_meta)

View File

@ -1,126 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for proton API
"""
from oslo_log import log as logging
import threading
from daisy import i18n
from daisy.common import exception
from daisy.api.backends import driver
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.proton.common as proton_cmn
import daisy.api.backends.proton.install as instl
import daisy.api.backends.proton.uninstall as unstl
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
proton_state = proton_cmn.PROTON_STATE
class API(driver.DeploymentDriver):
"""
The hosts API is a RESTful web service for host data. The API
is as follows::
GET /hosts -- Returns a set of brief metadata about hosts
GET /hosts/detail -- Returns a set of detailed metadata about
hosts
HEAD /hosts/<ID> -- Return metadata about an host with id <ID>
GET /hosts/<ID> -- Return host data for host with id <ID>
POST /hosts -- Store host data and return metadata about the
newly-stored host
PUT /hosts/<ID> -- Update host metadata and/or upload host
data for a previously-reserved host
DELETE /hosts/<ID> -- Delete the host with id <ID>
"""
def __init__(self):
super(API, self).__init__()
return
def install(self, req, cluster_id):
"""
Install PROTON to a cluster.
cluster_id:cluster id
"""
proton_install_task = instl.ProtonInstallTask(req, cluster_id)
proton_install_task.start()
def _uninstall(self, req, role_id, threads):
try:
for t in threads:
t.setDaemon(True)
t.start()
LOG.info(_("uninstall threads have started,"
" please waiting...."))
for t in threads:
t.join()
except:
LOG.warn(_("Join uninstall thread failed!"))
else:
uninstall_failed_flag = False
role = daisy_cmn.get_role_detail(req, role_id)
if role['progress'] == 100:
unstl.update_progress_to_db(
req, role_id, proton_state['UNINSTALL_FAILED'])
uninstall_failed_flag = True
return
if role['status'] == proton_state['UNINSTALL_FAILED']:
uninstall_failed_flag = True
return
if not uninstall_failed_flag:
LOG.info(_("all uninstall threads have done,"
" set role of proton status to 'init'!"))
unstl.update_progress_to_db(req, role_id,
proton_state['INIT'])
def uninstall(self, req, cluster_id):
"""
Uninstall PROTON to a cluster.
:raises HTTPBadRequest if x-install-cluster is missing
"""
(role_id, hosts_list) = proton_cmn.get_roles_and_hosts_list(req,
cluster_id)
if role_id:
if not hosts_list:
msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg)
unstl.update_progress_to_db(req, role_id,
proton_state['UNINSTALLING'], 0.0)
uninstall_progress_percentage = \
round(1 * 1.0 / len(hosts_list), 2) * 100
threads = []
for host in hosts_list:
host_detail = proton_cmn.get_host_detail(req, host['host_id'])
t = threading.Thread(target=unstl.thread_bin,
args=(req,
host_detail['interfaces'][0]['ip'],
role_id,
uninstall_progress_percentage))
threads.append(t)
self._uninstall(req, role_id, threads)

View File

@ -1,178 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for proton API
"""
import subprocess
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from daisy import i18n
from daisy.common import exception
import daisy.registry.client.v1.api as registry
import daisy.api.backends.common as daisy_cmn
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
daisy_proton_path = '/var/lib/daisy/proton/'
PROTON_STATE = {
'INIT': 'init',
'INSTALLING': 'installing',
'ACTIVE': 'active',
'INSTALL_FAILED': 'install-failed',
'UNINSTALLING': 'uninstalling',
'UNINSTALL_FAILED': 'uninstall-failed',
'UPDATING': 'updating',
'UPDATE_FAILED': 'update-failed',
}
def get_host_detail(req, host_id):
try:
host_detail = registry.get_host_metadata(req.context, host_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return host_detail
def get_roles_detail(req):
try:
roles = registry.get_roles_detail(req.context)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return roles
def get_hosts_of_role(req, role_id):
try:
hosts = registry.get_role_host_metadata(req.context, role_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return hosts
def get_roles_and_hosts_list(req, cluster_id):
roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
for role in roles:
if role['deployment_backend'] == daisy_cmn.proton_backend_name:
role_hosts = get_hosts_of_role(req, role['id'])
return (role['id'], role_hosts)
def get_role_detail(req, role_id):
try:
role = registry.get_role_metadata(req.context, role_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return role
def check_and_get_proton_version(daisy_proton_path):
proton_version_pkg_name = ""
get_proton_version_pkg = "ls %s| grep ^ZXDTC-PROTON.*\.bin$" \
% daisy_proton_path
obj = subprocess.Popen(
get_proton_version_pkg, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
if stdoutput:
proton_version_pkg_name = stdoutput.split('\n')[0]
proton_version_pkg_file = daisy_proton_path + proton_version_pkg_name
chmod_for_proton_version = 'chmod +x %s' % proton_version_pkg_file
daisy_cmn.subprocess_call(chmod_for_proton_version)
return proton_version_pkg_name
class ProtonShellExector():
"""
Install proton bin.
"""
def __init__(self, mgt_ip, proton_version_name, task_type, rmc_ip=''):
self.task_type = task_type
self.mgt_ip = mgt_ip
self.proton_version_file = daisy_proton_path + proton_version_name
self.rmc_ip = rmc_ip
self.clush_cmd = ""
self.oper_type = {
'install': self._install_proton,
'uninstall': self._uninstall_proton
}
self.oper_shell = {
'CMD_SSHPASS_PRE': "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s",
'CMD_BIN_SCP':
"scp %(path)s root@%(ssh_ip)s:/home" %
{'path': self.proton_version_file, 'ssh_ip': mgt_ip},
'CMD_BIN_INSTALL': "sudo /home/%s install %s 7777" %
(proton_version_name, self.rmc_ip),
'CMD_BIN_UNINSTALL': "sudo /home/%s uninstall" %
proton_version_name,
'CMD_BIN_REMOVE': "sudo rm -rf /home/%s" % proton_version_name
}
self._execute()
def _install_proton(self):
self.clush_cmd = \
"%s;%s" % (
self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip": "", "cmd": self.oper_shell['CMD_BIN_SCP']},
self.oper_shell['CMD_SSHPASS_PRE'] %
{
"ssh_ip": "ssh " + self.mgt_ip, "cmd":
self.oper_shell['CMD_BIN_INSTALL']
}
)
subprocess.check_output(self.clush_cmd, shell=True,
stderr=subprocess.STDOUT)
def _uninstall_proton(self):
self.clush_cmd = \
"%s;%s" % (
self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip": "", "cmd": self.oper_shell['CMD_BIN_SCP']},
self.oper_shell['CMD_SSHPASS_PRE'] %
{
"ssh_ip": "ssh " + self.mgt_ip,
"cmd": self.oper_shell['CMD_BIN_UNINSTALL']
}
)
subprocess.check_output(self.clush_cmd, shell=True,
stderr=subprocess.STDOUT)
def _execute(self):
try:
if not self.task_type or not self.mgt_ip:
LOG.error(_("<<<ProtonShellExector::execute,"
" input params invalid!>>>"))
return
self.oper_type[self.task_type]()
except subprocess.CalledProcessError as e:
LOG.warn(_("<<<ProtonShellExector::execute:Execute command "
"failed! Reason:%s>>>" % e.output.strip()))
except Exception as e:
LOG.exception(_(e.message))
else:
LOG.info(_("<<<ProtonShellExector::execute:Execute command:%s,"
"successful!>>>" % self.clush_cmd))

View File

@ -1,152 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for proton API
"""
from oslo_log import log as logging
from threading import Thread
from daisy import i18n
import daisy.api.v1
from daisy.common import exception
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.proton.common as proton_cmn
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
proton_state = proton_cmn.PROTON_STATE
daisy_proton_path = proton_cmn.daisy_proton_path
def get_proton_ip(req, role_hosts):
proton_ip_list = []
for role_host in role_hosts:
host_detail = proton_cmn.get_host_detail(req,
role_host['host_id'])
for interface in host_detail['interfaces']:
for network in interface['assigned_networks']:
if network.get("name") == "MANAGEMENT":
proton_ip_list.append(network.get("ip"))
return proton_ip_list
def get_proton_hosts(req, cluster_id):
all_roles = proton_cmn.get_roles_detail(req)
for role in all_roles:
if role['cluster_id'] == cluster_id and role['name'] == 'PROTON':
role_hosts = proton_cmn.get_hosts_of_role(req, role['id'])
return get_proton_ip(req, role_hosts)
def get_rmc_host(req, cluster_id):
return "10.43.211.63"
class ProtonInstallTask(Thread):
"""
Class for install proton bin.
"""
def __init__(self, req, cluster_id):
super(ProtonInstallTask, self).__init__()
self.req = req
self.cluster_id = cluster_id
self.progress = 0
self.message = ""
self.state = proton_state['INIT']
self.proton_ip_list = []
self.install_log_fp = None
self.last_line_num = 0
self.need_install = False
self.ping_times = 36
def _update_install_progress_to_db(self):
"""
Update progress of intallation to db.
:return:
"""
roles = daisy_cmn.get_cluster_roles_detail(self.req, self.cluster_id)
for role in roles:
if role['deployment_backend'] != daisy_cmn.proton_backend_name:
continue
role_hosts = daisy_cmn.get_hosts_of_role(self.req, role['id'])
for role_host in role_hosts:
if role_host['status'] != proton_state['ACTIVE']:
self.need_install = True
role_host['status'] = self.state
daisy_cmn.update_role_host(self.req, role_host['id'],
role_host)
role['status'] = self.state
role['messages'] = self.message
daisy_cmn.update_role(self.req, role['id'], role)
def run(self):
try:
self._run()
except (exception.InstallException,
exception.NotFound,
exception.InstallTimeoutException) as e:
LOG.exception(e.message)
else:
self.progress = 100
self.state = proton_state['ACTIVE']
self.message = "Proton install successfully"
LOG.info(_("Install PROTON for cluster %s successfully." %
self.cluster_id))
finally:
self._update_install_progress_to_db()
def _run(self):
"""
Exectue install file(.bin) with sync mode.
:return:
"""
if not self.cluster_id or not self.req:
raise exception.InstallException(
cluster_id=self.cluster_id, reason="invalid params.")
self.proton_ip_list = get_proton_hosts(self.req, self.cluster_id)
unreached_hosts = daisy_cmn.check_ping_hosts(self.proton_ip_list,
self.ping_times)
if unreached_hosts:
self.state = proton_state['INSTALL_FAILED']
self.message = "hosts %s ping failed" % unreached_hosts
raise exception.NotFound(message=self.message)
proton_version_name = \
proton_cmn.check_and_get_proton_version(daisy_proton_path)
if not proton_version_name:
self.state = proton_state['INSTALL_FAILED']
self.message = "PROTON version file not found in %s" % \
daisy_proton_path
raise exception.NotFound(message=self.message)
rmc_ip = get_rmc_host(self.req, self.cluster_id)
for proton_ip in self.proton_ip_list:
proton_cmn.ProtonShellExector(proton_ip, proton_version_name,
'install', rmc_ip)

View File

@ -1,103 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/hosts endpoint for Daisy v1 API
"""
import subprocess
from oslo_log import log as logging
import threading
from daisy import i18n
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.proton.common as proton_cmn
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
proton_state = proton_cmn.PROTON_STATE
daisy_proton_path = proton_cmn.daisy_proton_path
# uninstall init progress is 100, when uninstall succefully,
# uninstall progress is 0, and web display progress is reverted
uninstall_proton_progress = 100.0
uninstall_mutex = threading.Lock()
def update_progress_to_db(req, role_id, status, progress_percentage_step=0.0):
"""
Write uninstall progress and status to db, we use global lock object
'uninstall_mutex' to make sure this function is thread safety.
:param req: http req.
:param role_id_list: Column neeb be update in role table.
:param status: Uninstall status.
:return:
"""
global uninstall_mutex
global uninstall_proton_progress
uninstall_mutex.acquire(True)
uninstall_proton_progress -= progress_percentage_step
role = {}
role_hosts = daisy_cmn.get_hosts_of_role(req, role_id)
if status == proton_state['UNINSTALLING']:
role['status'] = status
role['progress'] = uninstall_proton_progress
role['messages'] = 'Proton uninstalling'
for role_host in role_hosts:
role_host_meta = dict()
role_host_meta['status'] = status
role_host_meta['progress'] = uninstall_proton_progress
daisy_cmn.update_role_host(req, role_host['id'], role_host_meta)
if status == proton_state['UNINSTALL_FAILED']:
role['status'] = status
role['messages'] = 'Uninstall-failed'
for role_host in role_hosts:
role_host_meta = dict()
role_host_meta['status'] = status
daisy_cmn.update_role_host(req, role_host['id'], role_host_meta)
elif status == proton_state['INIT']:
role['status'] = status
role['progress'] = 0
role['messages'] = 'Proton uninstall successfully'
daisy_cmn.delete_role_hosts(req, role_id)
daisy_cmn.update_role(req, role_id, role)
uninstall_mutex.release()
def _thread_bin(req, host_ip, role_id, uninstall_progress_percentage):
try:
proton_version_name = \
proton_cmn.check_and_get_proton_version(daisy_proton_path)
proton_cmn.ProtonShellExector(host_ip, proton_version_name,
'uninstall')
except subprocess.CalledProcessError:
update_progress_to_db(req, role_id, proton_state['UNINSTALL_FAILED'])
LOG.info(_("Uninstall PROTON for %s failed!" % host_ip))
else:
update_progress_to_db(req, role_id, proton_state['UNINSTALLING'],
uninstall_progress_percentage)
LOG.info(_("Uninstall PROTON for %s successfully!" % host_ip))
def thread_bin(req, host_ip, role_id, uninstall_progress_percentage):
try:
_thread_bin(req, host_ip, role_id, uninstall_progress_percentage)
except Exception as e:
LOG.exception(e.message)

View File

@ -1,427 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for tecs API
"""
import subprocess
import commands
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
import threading
from daisy import i18n
from daisy.common import exception
from daisy.api.backends.tecs import config
from daisy.api.backends import driver
import daisy.api.backends.os as os_handle
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn
import daisy.api.backends.tecs.install as instl
import daisy.api.backends.tecs.uninstall as unstl
import daisy.api.backends.tecs.upgrade as upgrd
import daisy.api.backends.tecs.disk_array as disk_array
from daisy.api.backends.tecs import write_configs
import daisy.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
CONF = cfg.CONF
upgrade_opts = [
cfg.StrOpt('max_parallel_os_upgrade_number', default=10,
help='Maximum number of hosts upgrade os at the same time.'),
]
CONF.register_opts(upgrade_opts)
tecs_state = tecs_cmn.TECS_STATE
daisy_tecs_path = tecs_cmn.daisy_tecs_path
class API(driver.DeploymentDriver):
"""
The hosts API is a RESTful web service for host data. The API
is as follows::
GET /hosts -- Returns a set of brief metadata about hosts
GET /hosts/detail -- Returns a set of detailed metadata about
hosts
HEAD /hosts/<ID> -- Return metadata about an host with id <ID>
GET /hosts/<ID> -- Return host data for host with id <ID>
POST /hosts -- Store host data and return metadata about the
newly-stored host
PUT /hosts/<ID> -- Update host metadata and/or upload host
data for a previously-reserved host
DELETE /hosts/<ID> -- Delete the host with id <ID>
"""
def __init__(self):
super(API, self).__init__()
return
def install(self, req, cluster_id):
"""
Install TECS to a cluster.
param req: The WSGI/Webob Request object
cluster_id:cluster id
"""
write_configs.update_configset(req, cluster_id)
tecs_install_task = instl.TECSInstallTask(req, cluster_id)
tecs_install_task.start()
def _get_roles_and_hosts_ip_list(self, req, cluster_id):
role_host_ips = {'ha': set(), 'lb': set(), 'all': set()}
role_id_list = set()
hosts_id_list = []
hosts_list = []
tecs_install_failed_list = set()
roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
cluster_networks = daisy_cmn.get_cluster_networks_detail(
req, cluster_id)
for role in roles:
if role['deployment_backend'] != daisy_cmn.tecs_backend_name:
continue
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id'])
if role_hosts:
for role_host in role_hosts:
host = daisy_cmn.get_host_detail(req, role_host['host_id'])
host_ip = tecs_cmn.get_host_network_ip(
req, host, cluster_networks, 'MANAGEMENT')
if role['name'] == "CONTROLLER_HA":
role_host_ips['ha'].add(host_ip)
if role['name'] == "CONTROLLER_LB":
role_host_ips['lb'].add(host_ip)
role_host_ips['all'].add(host_ip)
hosts_id_list.append({host['id']: host_ip})
if role_host['status'] == tecs_state['INSTALL_FAILED']:
tecs_install_failed_list.add(host_ip)
role_id_list.add(role['id'])
for host in hosts_id_list:
if host not in hosts_list:
hosts_list.append(host)
return (role_id_list, role_host_ips,
hosts_list, tecs_install_failed_list)
def _query_progress(self, req, cluster_id, action=""):
nodes_list = []
roles = daisy_cmn.get_roles_detail(req)
(role_id_list, role_host_ips, hosts_list, tecs_install_failed_list) =\
self._get_roles_and_hosts_ip_list(req, cluster_id)
for host in hosts_list:
node = {}
host_id = host.keys()[0]
host = daisy_cmn.get_host_detail(req, host_id)
node['id'] = host['id']
node['name'] = host['name']
if 0 == cmp("upgrade", action):
node['os-progress'] = host['os_progress']
node['os-status'] = host['os_status']
node['os-messages'] = host['messages']
if host['status'] == "with-role":
host_roles = [role for role in roles if role['name'] in host[
'role'] and role['cluster_id'] == cluster_id]
if host_roles:
node['role-status'] = host_roles[0]['status']
node['role-progress'] = str(host_roles[0]['progress'])
# node['role-message'] = host_roles[0]['messages']
nodes_list.append(node)
if nodes_list:
return {'tecs_nodes': nodes_list}
else:
return {'tecs_nodes': "TECS uninstall successfully,\
the host has been removed from the host_roles table"}
def _modify_running_version_of_configs(self, req,
running_version, cluster_id):
cluster_configs_list = daisy_cmn.get_cluster_configs_list(req,
cluster_id)
if cluster_configs_list:
for cluster_config in cluster_configs_list:
registry.update_config_metadata(req.context,
cluster_config['id'],
{'running_version':
running_version})
def uninstall(self, req, cluster_id):
"""
Uninstall TECS to a cluster.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
(role_id_list, role_host_ips, hosts_list, tecs_install_failed_list) =\
self._get_roles_and_hosts_ip_list(req, cluster_id)
if role_id_list:
if not role_host_ips['all']:
msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg)
unstl.update_progress_to_db(
req, role_id_list, tecs_state['UNINSTALLING'], hosts_list)
threads = []
for host_ip in role_host_ips['all']:
t = threading.Thread(
target=unstl.thread_bin, args=(req, host_ip, role_id_list,
hosts_list))
t.setDaemon(True)
t.start()
threads.append(t)
LOG.info(_("Uninstall threads have started, please waiting...."))
try:
for t in threads:
t.join()
except:
LOG.warn(_("Join uninstall thread %s failed!" % t))
else:
uninstall_failed_flag = False
for role_id in role_id_list:
role_hosts = daisy_cmn.get_hosts_of_role(req, role_id)
for role_host in role_hosts:
if role_host['status'] ==\
tecs_state['UNINSTALL_FAILED']:
unstl.update_progress_to_db(
req, role_id_list, tecs_state[
'UNINSTALL_FAILED'], hosts_list)
uninstall_failed_flag = True
break
if not uninstall_failed_flag:
LOG.info(
_("All uninstall threads have done,\
set all roles status to 'init'!"))
unstl.update_progress_to_db(
req, role_id_list, tecs_state['INIT'], hosts_list)
LOG.info(_("modify the running_version of configs to 0"))
running_version = 0
self._modify_running_version_of_configs(
req, running_version, cluster_id)
tecs_cmn.inform_provider_cloud_state(req.context, cluster_id,
operation='delete')
try:
(status, output) = commands.getstatusoutput('rpm -e --nodeps openstack-packstack\
openstack-packstack-puppet \
openstack-puppet-modules puppet')
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
def uninstall_progress(self, req, cluster_id):
return self._query_progress(req, cluster_id, "uninstall")
def upgrade(self, req, cluster_id):
"""
update TECS to a cluster.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
# daisy_update_path = '/home/daisy_update/'
(role_id_list, role_host_ips, hosts_list, tecs_install_failed_list) =\
self._get_roles_and_hosts_ip_list(req, cluster_id)
if role_id_list:
if not role_host_ips['all']:
msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg)
unreached_hosts = daisy_cmn.check_ping_hosts(
role_host_ips['all'], 1)
if unreached_hosts:
self.message = "hosts %s ping failed" % unreached_hosts
raise exception.NotFound(message=self.message)
daisy_cmn.subprocess_call('rm -rf /root/.ssh/known_hosts')
if os_handle.check_tfg_exist():
os_handle.upgrade_os(req, hosts_list)
unreached_hosts = daisy_cmn.check_ping_hosts(
role_host_ips['all'], 30)
if unreached_hosts:
self.message = "hosts %s ping failed after tfg upgrade" \
% unreached_hosts
raise exception.NotFound(message=self.message)
# check and get TECS version
tecs_version_pkg_file = tecs_cmn.check_and_get_tecs_version(
tecs_cmn.daisy_tecs_path)
if not tecs_version_pkg_file:
self.state = tecs_state['UPDATE_FAILED']
self.message = "TECS version file not found in %s"\
% tecs_cmn.daisy_tecs_path
raise exception.NotFound(message=self.message)
threads = []
LOG.info(
_("Begin to update TECS controller nodes, please waiting...."))
upgrd.update_progress_to_db(
req, role_id_list, tecs_state['UPDATING'], hosts_list)
for host_ip in role_host_ips['ha']:
if host_ip in tecs_install_failed_list:
continue
LOG.info(_("Update TECS controller node %s..." % host_ip))
rc = upgrd.thread_bin(req, role_id_list, host_ip, hosts_list)
if rc == 0:
LOG.info(_("Update TECS for %s successfully" % host_ip))
else:
LOG.info(
_("Update TECS failed for %s, return %s"
% (host_ip, rc)))
return
LOG.info(_("Begin to update TECS other nodes, please waiting...."))
max_parallel_upgrade_number = int(
CONF.max_parallel_os_upgrade_number)
compute_ip_list = role_host_ips[
'all'] - role_host_ips['ha'] - tecs_install_failed_list
while compute_ip_list:
threads = []
if len(compute_ip_list) > max_parallel_upgrade_number:
upgrade_hosts = compute_ip_list[
:max_parallel_upgrade_number]
compute_ip_list = compute_ip_list[
max_parallel_upgrade_number:]
else:
upgrade_hosts = compute_ip_list
compute_ip_list = []
for host_ip in upgrade_hosts:
t = threading.Thread(
target=upgrd.thread_bin,
args=(req, role_id_list, host_ip, hosts_list))
t.setDaemon(True)
t.start()
threads.append(t)
try:
for t in threads:
t.join()
except:
LOG.warn(_("Join update thread %s failed!" % t))
for role_id in role_id_list:
role_hosts = daisy_cmn.get_hosts_of_role(req, role_id)
for role_host in role_hosts:
if (role_host['status'] == tecs_state['UPDATE_FAILED'] or
role_host['status'] == tecs_state['UPDATING']):
role_id = [role_host['role_id']]
upgrd.update_progress_to_db(req,
role_id,
tecs_state[
'UPDATE_FAILED'],
hosts_list)
break
elif role_host['status'] == tecs_state['ACTIVE']:
role_id = [role_host['role_id']]
upgrd.update_progress_to_db(req,
role_id,
tecs_state['ACTIVE'],
hosts_list)
def upgrade_progress(self, req, cluster_id):
return self._query_progress(req, cluster_id, "upgrade")
def export_db(self, req, cluster_id):
"""
Export daisy db data to tecs.conf and HA.conf.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
tecs_config =\
instl.get_cluster_tecs_config(req, cluster_id)
config_files = {'tecs_conf': '', 'ha_conf': ''}
tecs_install_path = "/home/tecs_install"
if tecs_config:
cluster_conf_path = tecs_install_path + "/" + cluster_id
create_cluster_conf_path =\
"rm -rf %s;mkdir %s" % (cluster_conf_path, cluster_conf_path)
daisy_cmn.subprocess_call(create_cluster_conf_path)
config.update_tecs_config(tecs_config, cluster_conf_path)
get_tecs_conf = "ls %s|grep tecs.conf" % cluster_conf_path
obj = subprocess.Popen(get_tecs_conf,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
tecs_conf_file = ""
if stdoutput:
tecs_conf_file = stdoutput.split('\n')[0]
config_files['tecs_conf'] =\
cluster_conf_path + "/" + tecs_conf_file
get_ha_conf_cmd = "ls %s|grep HA_1.conf" % cluster_conf_path
obj = subprocess.Popen(get_ha_conf_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
ha_conf_file = ""
if stdoutput:
ha_conf_file = stdoutput.split('\n')[0]
config_files['ha_conf'] =\
cluster_conf_path + "/" + ha_conf_file
else:
LOG.info(_("No TECS config files generated."))
return config_files
def update_disk_array(self, req, cluster_id):
(share_disk_info, volume_disk_info) =\
disk_array.get_disk_array_info(req, cluster_id)
array_nodes_addr =\
tecs_cmn.get_disk_array_nodes_addr(req, cluster_id)
ha_nodes_ip = array_nodes_addr['ha'].keys()
all_nodes_ip = list(array_nodes_addr['computer']) + ha_nodes_ip
if all_nodes_ip:
compute_error_msg =\
disk_array.config_compute_multipath(all_nodes_ip)
if compute_error_msg:
return compute_error_msg
else:
LOG.info(_("Config Disk Array multipath successfully"))
if share_disk_info:
ha_error_msg =\
disk_array.config_ha_share_disk(share_disk_info,
array_nodes_addr['ha'])
if ha_error_msg:
return ha_error_msg
else:
LOG.info(_("Config Disk Array for HA nodes successfully"))
if volume_disk_info:
cinder_error_msg =\
disk_array.config_ha_cinder_volume(volume_disk_info,
ha_nodes_ip)
if cinder_error_msg:
return cinder_error_msg
else:
LOG.info(_("Config cinder volume for HA nodes successfully"))
return 'update successfully'

View File

@ -1,496 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for tecs API
"""
import os
import copy
import subprocess
import re
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from daisy import i18n
from daisy.common import utils
from daisy.common import exception
import daisy.registry.client.v1.api as registry
import daisy.api.backends.common as daisy_cmn
from daisyclient.v1 import client as daisy_client
import ConfigParser
STR_MASK = '*' * 8
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
daisy_tecs_path = '/var/lib/daisy/tecs/'
tecs_install_path = '/home/tecs_install'
TECS_STATE = {
'INIT': 'init',
'INSTALLING': 'installing',
'ACTIVE': 'active',
'INSTALL_FAILED': 'install-failed',
'UNINSTALLING': 'uninstalling',
'UNINSTALL_FAILED': 'uninstall-failed',
'UPDATING': 'updating',
'UPDATE_FAILED': 'update-failed',
}
def get_daisyclient():
"""Get Daisy client instance."""
config_daisy = ConfigParser.ConfigParser()
config_daisy.read("/etc/daisy/daisy-api.conf")
daisy_port = config_daisy.get("DEFAULT", "bind_port")
args = {'version': 1.0, 'endpoint': 'http://127.0.0.1:' + daisy_port}
return daisy_client.Client(**args)
def mkdir_tecs_install(host_ips=None):
if not host_ips:
cmd = "mkdir -p %s" % tecs_install_path
daisy_cmn.subprocess_call(cmd)
return
for host_ip in host_ips:
cmd = 'clush -S -w %s "mkdir -p %s"' % (host_ip, tecs_install_path)
daisy_cmn.subprocess_call(cmd)
def _get_cluster_network(cluster_networks, network_name):
network = [cn for cn in cluster_networks if cn['name'] == network_name]
if not network or not network[0]:
msg = "network %s is not exist" % (network_name)
raise exception.InvalidNetworkConfig(msg)
else:
return network[0]
def get_host_interface_by_network(host_detail, network_name):
host_detail_info = copy.deepcopy(host_detail)
interface_list = [hi for hi in host_detail_info['interfaces']
for assigned_network in hi['assigned_networks']
if assigned_network and
network_name == assigned_network['name']]
interface = {}
if interface_list:
interface = interface_list[0]
if not interface and 'MANAGEMENT' == network_name:
msg = "network %s of host %s is not exist" % (
network_name, host_detail_info['id'])
raise exception.InvalidNetworkConfig(msg)
return interface
def get_host_network_ip(req, host_detail, cluster_networks, network_name):
interface_network_ip = ''
host_interface = get_host_interface_by_network(host_detail, network_name)
if host_interface:
network = _get_cluster_network(cluster_networks, network_name)
assigned_network = daisy_cmn.get_assigned_network(req,
host_interface['id'],
network['id'])
interface_network_ip = assigned_network['ip']
if not interface_network_ip and 'MANAGEMENT' == network_name:
msg = "%s network ip of host %s can't be empty" % (
network_name, host_detail['id'])
raise exception.InvalidNetworkConfig(msg)
return interface_network_ip
def get_storage_name_ip_dict(req, cluster_id, network_type):
name_ip_list = []
ip_list = []
roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id)
networks_list = [network for network in cluster_networks
if network['network_type'] == network_type]
networks_name_list = [network['name'] for network in networks_list]
for role in roles:
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id'])
for role_host in role_hosts:
host_detail = daisy_cmn.get_host_detail(req, role_host['host_id'])
for network_name in networks_name_list:
ip = get_host_network_ip(req, host_detail, cluster_networks,
network_name)
name_ip_dict = {}
if ip and ip not in ip_list:
ip_list.append(ip)
name_ip_dict.update({host_detail['name'] + '.' +
network_name: ip})
name_ip_list.append(name_ip_dict)
return name_ip_list
def get_network_netmask(cluster_networks, network_name):
network = _get_cluster_network(cluster_networks, network_name)
cidr = network['cidr']
if not cidr:
msg = "cidr of network %s is not exist" % (network_name)
raise exception.InvalidNetworkConfig(msg)
netmask = daisy_cmn.cidr_to_netmask(cidr)
if not netmask:
msg = "netmask of network %s is not exist" % (network_name)
raise exception.InvalidNetworkConfig(msg)
return netmask
# every host only have one gateway
def get_network_gateway(cluster_networks, network_name):
network = _get_cluster_network(cluster_networks, network_name)
gateway = network['gateway']
return gateway
def get_network_cidr(cluster_networks, network_name):
network = _get_cluster_network(cluster_networks, network_name)
cidr = network['cidr']
if not cidr:
msg = "cidr of network %s is not exist" % (network_name)
raise exception.InvalidNetworkConfig(msg)
return cidr
def get_mngt_network_vlan_id(cluster_networks):
mgnt_vlan_id = ""
management_network = [network for network in cluster_networks if network[
'network_type'] == 'MANAGEMENT']
if (not management_network or
not management_network[0] or
# not management_network[0].has_key('vlan_id')):
'vlan_id' not in management_network[0]):
msg = "can't get management network vlan id"
raise exception.InvalidNetworkConfig(msg)
else:
mgnt_vlan_id = management_network[0]['vlan_id']
return mgnt_vlan_id
def get_network_vlan_id(cluster_networks, network_type):
vlan_id = ""
general_network = [network for network in cluster_networks
if network['network_type'] == network_type]
if (not general_network or not general_network[0] or
# not general_network[0].has_key('vlan_id')):
'vlan_id' not in general_network[0]):
msg = "can't get %s network vlan id" % network_type
raise exception.InvalidNetworkConfig(msg)
else:
vlan_id = general_network[0]['vlan_id']
return vlan_id
def sort_interfaces_by_pci(networks, host_detail):
"""
Sort interfaces by pci segment, if interface type is bond,
user the pci of first memeber nic.This function is fix bug for
the name length of ovs virtual port, because if the name length large than
15 characters, the port will create failed.
:param interfaces: interfaces info of the host
:return:
"""
interfaces = eval(host_detail.get('interfaces', None)) \
if isinstance(host_detail, unicode) else \
host_detail.get('interfaces', None)
if not interfaces:
LOG.info("This host has no interfaces info.")
return host_detail
tmp_interfaces = copy.deepcopy(interfaces)
slaves_name_list = []
for interface in tmp_interfaces:
if interface.get('type', None) == "bond" and\
interface.get('slave1', None) and\
interface.get('slave2', None):
slaves_name_list.append(interface['slave1'])
slaves_name_list.append(interface['slave2'])
for interface in interfaces:
if interface.get('name') not in slaves_name_list:
vlan_id_len_list = [len(network['vlan_id'])
for assigned_network in interface.get(
'assigned_networks', [])
for network in networks
if assigned_network.get('name') ==
network.get('name') and network.get('vlan_id')]
max_vlan_id_len = max(vlan_id_len_list) if vlan_id_len_list else 0
interface_name_len = len(interface['name'])
redundant_bit = interface_name_len + max_vlan_id_len - 14
interface['name'] = interface['name'][
redundant_bit:] if redundant_bit > 0 else interface['name']
return host_detail
def check_and_get_tecs_version(daisy_tecs_pkg_path):
tecs_version_pkg_file = ""
get_tecs_version_pkg = "ls %s| grep ^ZXTECS.*\.bin$" % daisy_tecs_pkg_path
obj = subprocess.Popen(get_tecs_version_pkg,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
if stdoutput:
tecs_version_pkg_name = stdoutput.split('\n')[0]
tecs_version_pkg_file = daisy_tecs_pkg_path + tecs_version_pkg_name
chmod_for_tecs_version = 'chmod +x %s' % tecs_version_pkg_file
daisy_cmn.subprocess_call(chmod_for_tecs_version)
return tecs_version_pkg_file
def get_service_disk_list(req, params):
try:
service_disks = registry.list_service_disk_metadata(
req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return service_disks
def get_cinder_volume_list(req, params):
try:
cinder_volumes = registry.list_cinder_volume_metadata(
req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return cinder_volumes
def mask_string(unmasked, mask_list=None, replace_list=None):
"""
Replaces words from mask_list with MASK in unmasked string.
If words are needed to be transformed before masking, transformation
could be describe in replace list. For example [("'","'\\''")]
replaces all ' characters with '\\''.
"""
mask_list = mask_list or []
replace_list = replace_list or []
masked = unmasked
for word in sorted(mask_list, lambda x, y: len(y) - len(x)):
if not word:
continue
for before, after in replace_list:
word = word.replace(before, after)
masked = masked.replace(word, STR_MASK)
return masked
def run_scrip(script, ip=None, password=None, msg=None):
try:
_run_scrip(script, ip, password)
except:
msg1 = 'Error occurred during running scripts.'
message = msg1 + msg if msg else msg1
LOG.error(message)
raise HTTPForbidden(explanation=message)
else:
LOG.info('Running scripts successfully!')
def _run_scrip(script, ip=None, password=None):
mask_list = []
repl_list = [("'", "'\\''")]
script = "\n".join(script)
_PIPE = subprocess.PIPE
if ip:
cmd = ["sshpass", "-p", "%s" % password,
"ssh", "-o StrictHostKeyChecking=no",
"%s" % ip, "bash -x"]
else:
cmd = ["bash", "-x"]
environ = os.environ
environ['LANG'] = 'en_US.UTF8'
obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE,
close_fds=True, shell=False, env=environ)
script = "function t(){ exit $? ; } \n trap t ERR \n" + script
out, err = obj.communicate(script)
masked_out = mask_string(out, mask_list, repl_list)
masked_err = mask_string(err, mask_list, repl_list)
if obj.returncode:
pattern = (r'^ssh\:')
if re.search(pattern, err):
LOG.error(_("Network error occured when run script."))
raise exception.NetworkError(masked_err, stdout=out, stderr=err)
else:
msg = ('Failed to run remote script, stdout: %s\nstderr: %s' %
(masked_out, masked_err))
LOG.error(msg)
raise exception.ScriptRuntimeError(msg, stdout=out, stderr=err)
return obj.returncode, out
def inform_provider_cloud_state(context, cluster_id, **kwargs):
params = dict()
daisyclient = get_daisyclient()
cluster = registry.get_cluster_metadata(context, cluster_id)
params['operation'] = kwargs.get('operation')
params['name'] = cluster.get('name')
params['url'] = "http://" + cluster.get('public_vip')
params['provider_ip'] = cluster.get('hwm_ip')
daisyclient.node.cloud_state(**params)
def get_disk_array_nodes_addr(req, cluster_id):
controller_ha_nodes = {}
computer_ips = set()
roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
cluster_networks =\
daisy_cmn.get_cluster_networks_detail(req, cluster_id)
for role in roles:
if role['deployment_backend'] != daisy_cmn.tecs_backend_name:
continue
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id'])
for role_host in role_hosts:
# host has installed tecs are exclusive
if (role_host['status'] == TECS_STATE['ACTIVE'] or
role_host['status'] == TECS_STATE['UPDATING'] or
role_host['status'] == TECS_STATE['UPDATE_FAILED']):
continue
host_detail = daisy_cmn.get_host_detail(req,
role_host['host_id'])
host_ip = get_host_network_ip(req,
host_detail,
cluster_networks,
'MANAGEMENT')
if role['name'] == "CONTROLLER_HA":
min_mac = utils.get_host_min_mac(host_detail['interfaces'])
controller_ha_nodes[host_ip] = min_mac
if role['name'] == "COMPUTER":
computer_ips.add(host_ip)
return {'ha': controller_ha_nodes, 'computer': computer_ips}
def get_ctl_ha_nodes_min_mac(req, cluster_id):
'''
ctl_ha_nodes_min_mac = {'host_name1':'min_mac1', ...}
'''
ctl_ha_nodes_min_mac = {}
roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
cluster_networks =\
daisy_cmn.get_cluster_networks_detail(req, cluster_id)
for role in roles:
if role['deployment_backend'] != daisy_cmn.tecs_backend_name:
continue
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id'])
for role_host in role_hosts:
# host has installed tecs are exclusive
if (role_host['status'] == TECS_STATE['ACTIVE'] or
role_host['status'] == TECS_STATE['UPDATING'] or
role_host['status'] == TECS_STATE['UPDATE_FAILED']):
continue
host_detail = daisy_cmn.get_host_detail(req,
role_host['host_id'])
host_name = host_detail['name']
if role['name'] == "CONTROLLER_HA":
min_mac = utils.get_host_min_mac(host_detail['interfaces'])
ctl_ha_nodes_min_mac[host_name] = min_mac
return ctl_ha_nodes_min_mac
class TecsShellExector(object):
"""
Class config task before install tecs bin.
"""
def __init__(self, mgnt_ip, task_type, params={}):
self.task_type = task_type
self.mgnt_ip = mgnt_ip
self.params = params
self.clush_cmd = ""
self.rpm_name =\
daisy_cmn.get_rpm_package_by_name(daisy_tecs_path,
'network-configuration')
self.NETCFG_RPM_PATH = daisy_tecs_path + self.rpm_name
self.oper_type = {
'install_rpm': self._install_netcfg_rpm,
'uninstall_rpm': self._uninstall_netcfg_rpm,
'update_rpm': self._update_netcfg_rpm,
}
self.oper_shell = {
'CMD_SSHPASS_PRE': "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s",
'CMD_RPM_UNINSTALL': "rpm -e network-configuration",
'CMD_RPM_INSTALL': "rpm -i /home/%(rpm)s" % {'rpm': self.rpm_name},
'CMD_RPM_UPDATE': "rpm -U /home/%(rpm)s" % {'rpm': self.rpm_name},
'CMD_RPM_SCP': "scp -o StrictHostKeyChecking=no \
%(path)s root@%(ssh_ip)s:/home" %
{'path': self.NETCFG_RPM_PATH, 'ssh_ip': mgnt_ip}
}
LOG.info(_("<<<Network configuration rpm is %s>>>" % self.rpm_name))
self._execute()
def _uninstall_netcfg_rpm(self):
self.clush_cmd = self.oper_shell['CMD_SSHPASS_PRE'] % \
{"ssh_ip": "ssh -o StrictHostKeyChecking=no " + self.mgnt_ip,
"cmd": self.oper_shell['CMD_RPM_UNINSTALL']}
subprocess.check_output(
self.clush_cmd, shell=True, stderr=subprocess.STDOUT)
def _update_netcfg_rpm(self):
self.clush_cmd = self.oper_shell['CMD_SSHPASS_PRE'] % \
{"ssh_ip": "ssh -o StrictHostKeyChecking=no " + self.mgnt_ip,
"cmd": self.oper_shell['CMD_RPM_UPDATE']}
subprocess.check_output(
self.clush_cmd, shell=True, stderr=subprocess.STDOUT)
def _install_netcfg_rpm(self):
if not os.path.exists(self.NETCFG_RPM_PATH):
LOG.error(_("<<<Rpm %s not exist>>>" % self.NETCFG_RPM_PATH))
return
self.clush_cmd = "%s;%s" % \
(self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip": "", "cmd": self.oper_shell['CMD_RPM_SCP']},
self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip": "ssh -o StrictHostKeyChecking=no " +
self.mgnt_ip, "cmd": self.oper_shell['CMD_RPM_INSTALL']})
subprocess.check_output(
self.clush_cmd, shell=True, stderr=subprocess.STDOUT)
def _execute(self):
try:
if not self.task_type or not self.mgnt_ip:
LOG.error(
_("<<<TecsShellExector::execute, input params invalid on \
%s!>>>" % self.mgnt_ip, ))
return
self.oper_type[self.task_type]()
except subprocess.CalledProcessError as e:
LOG.warn(_("<<<TecsShellExector::execute:Execute command failed on\
%s! Reason:%s>>>" % (
self.mgnt_ip, e.output.strip())))
except Exception as e:
LOG.exception(_(e.message))
else:
LOG.info(_("<<<TecsShellExector::execute:Execute command:\
%s,successful on %s!>>>" % (
self.clush_cmd, self.mgnt_ip)))

View File

@ -1,976 +0,0 @@
# -*- coding: utf-8 -*-
import os
import re
import commands
import types
import subprocess
import socket
import netaddr
from oslo_log import log as logging
from ConfigParser import ConfigParser
from daisy.common import exception
from daisy import i18n
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
service_map = {
'lb': 'haproxy',
'mongodb': 'mongod',
'ha': '',
'mariadb': 'mariadb',
'amqp': 'rabbitmq-server',
'ceilometer-api': 'openstack-ceilometer-api',
'ceilometer-collector': 'openstack-ceilometer-collector,\
openstack-ceilometer-mend',
'ceilometer-central': 'openstack-ceilometer-central',
'ceilometer-notification': 'openstack-ceilometer-notification',
'ceilometer-alarm': 'openstack-ceilometer-alarm-evaluator,\
openstack-ceilometer-alarm-notifier',
'heat-api': 'openstack-heat-api',
'heat-api-cfn': 'openstack-heat-api-cfn',
'heat-engine': 'openstack-heat-engine',
'ironic': 'openstack-ironic-api,openstack-ironic-conductor',
'horizon': 'httpd,opencos-alarmmanager',
'keystone': 'openstack-keystone',
'glance': 'openstack-glance-api,openstack-glance-registry',
'cinder-volume': 'openstack-cinder-volume',
'cinder-scheduler': 'openstack-cinder-scheduler',
'cinder-api': 'openstack-cinder-api',
'neutron-metadata': 'neutron-metadata-agent',
'neutron-lbaas': 'neutron-lbaas-agent',
'neutron-dhcp': 'neutron-dhcp-agent',
'neutron-server': 'neutron-server',
'neutron-l3': 'neutron-l3-agent',
'compute': 'openstack-nova-compute',
'nova-cert': 'openstack-nova-cert',
'nova-sched': 'openstack-nova-scheduler',
'nova-vncproxy': 'openstack-nova-novncproxy,openstack-nova-consoleauth',
'nova-conductor': 'openstack-nova-conductor',
'nova-api': 'openstack-nova-api',
'nova-cells': 'openstack-nova-cells',
'camellia-api': 'camellia-api'
}
def add_service_with_host(services, name, host):
if name not in services:
services[name] = []
services[name].append(host)
def add_service_with_hosts(services, name, hosts):
if name not in services:
services[name] = []
for h in hosts:
services[name].append(h['management']['ip'])
def test_ping(ping_src_nic, ping_desc_ips):
ping_cmd = 'fping'
for ip in set(ping_desc_ips):
ping_cmd = ping_cmd + ' -I ' + ping_src_nic + ' ' + ip
obj = subprocess.Popen(
ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
_returncode = obj.returncode
if _returncode == 0 or _returncode == 1:
ping_result = stdoutput.split('\n')
if "No such device" in erroutput:
return []
reachable_hosts = [result.split(
)[0] for result in ping_result if result and
result.split()[2] == 'alive']
else:
msg = "ping failed beaceuse there is invlid ip in %s" % ping_desc_ips
raise exception.InvalidIP(msg)
return reachable_hosts
def get_local_deployment_ip(tecs_deployment_ips):
(status, output) = commands.getstatusoutput('ifconfig')
netcard_pattern = re.compile('\S*: ')
ip_str = '([0-9]{1,3}\.){3}[0-9]{1,3}'
# ip_pattern = re.compile('(inet %s)' % ip_str)
pattern = re.compile(ip_str)
nic_ip = {}
for netcard in re.finditer(netcard_pattern, str(output)):
nic_name = netcard.group().split(': ')[0]
if nic_name == "lo":
continue
ifconfig_nic_cmd = "ifconfig %s" % nic_name
(status, output) = commands.getstatusoutput(ifconfig_nic_cmd)
if status:
continue
ip = pattern.search(str(output))
if ip and ip.group() != "127.0.0.1":
nic_ip[nic_name] = ip.group()
deployment_ip = ''
for nic in nic_ip.keys():
if nic_ip[nic] in tecs_deployment_ips:
deployment_ip = nic_ip[nic]
break
if not deployment_ip:
for nic, ip in nic_ip.items():
if test_ping(nic, tecs_deployment_ips):
deployment_ip = nic_ip[nic]
break
return deployment_ip
class AnalsyConfig(object):
def __init__(self, all_configs):
self.all_configs = all_configs
self.services = {}
self.components = []
self.modes = {}
# self.ha_conf = {}
self.services_in_component = {}
# self.heartbeat = {}
self.lb_components = []
self.heartbeats = [[], [], []]
self.lb_vip = ''
self.ha_vip = ''
self.db_vip = ''
self.glance_vip = ''
self.public_vip = ''
self.share_disk_services = []
self.share_cluster_disk_services = []
self.ha_conf = {}
self.child_cell_dict = {}
self.ha_master_host = {}
def get_heartbeats(self, host_interfaces):
for network in host_interfaces:
self.heartbeats[0].append(network["management"]["ip"])
# if network.has_key("heartbeat1") and network["heartbeat1"]["ip"]:
if "heartbeat1" in network and network["heartbeat1"]["ip"]:
self.heartbeats[1].append(network["heartbeat1"]["ip"])
# if network.has_key("heartbeat2") and network["heartbeat2"]["ip"]:
if "heartbeat2" in network and network["heartbeat2"]["ip"]:
self.heartbeats[2].append(network["heartbeat2"]["ip"])
# if network.has_key("storage") and network["storage"]["ip"]:
if "storage" in network and network["storage"]["ip"]:
# if not network.has_key("heartbeat1"):
if "heartbeat1" not in network:
self.heartbeats[1].append(network["storage"]["ip"])
# if network.has_key("heartbeat1") and not \
# network.has_key("heartbeat2"):
if "heartbeat1" in network and \
"heartbeat2" not in network:
self.heartbeats[2].append(network["storage"]["ip"])
# delete empty heartbeat line
if not self.heartbeats[0]:
self.heartbeats[0] = self.heartbeats[1]
self.heartbeats[1] = self.heartbeats[2]
if not self.heartbeats[1]:
self.heartbeats[1] = self.heartbeats[2]
# remove repeated ip
if set(self.heartbeats[1]) == set(self.heartbeats[0]):
self.heartbeats[1] = []
if set(self.heartbeats[2]) != set(self.heartbeats[0]):
self.heartbeats[1] = self.heartbeats[2]
self.heartbeats[2] = []
if set(self.heartbeats[2]) == set(self.heartbeats[0]) or \
set(self.heartbeats[2]) == set(self.heartbeats[1]):
self.heartbeats[2] = []
def prepare_child_cell(self, child_cell_name, configs):
cell_compute_hosts = str()
cell_compute_name = child_cell_name[11:] + '_COMPUTER'
for role_name, role_configs in self.all_configs.items():
if role_name == cell_compute_name:
cell_compute_host = [
host_interface['management']['ip']
for host_interface in role_configs['host_interfaces']]
cell_compute_hosts = ",".join(cell_compute_host)
self.all_configs.pop(role_name)
child_cell_host = configs['host_interfaces'][0]['management']['ip']
self.child_cell_dict[repr(child_cell_host).strip("u'")] \
= repr(cell_compute_hosts).strip("u'")
def prepare_ha_lb(self, role_configs, is_ha, is_lb):
if is_lb:
self.ha_master_host['ip'] = role_configs[
'host_interfaces'][0]['management']['ip']
self.ha_master_host['hostname'] = role_configs[
'host_interfaces'][0]['name']
self.components.append('CONFIG_LB_INSTALL')
add_service_with_hosts(self.services,
'CONFIG_LB_BACKEND_HOSTS',
role_configs['host_interfaces'])
self.lb_vip = role_configs['vip']
if is_ha:
# convert dns to ip
manage_ips = []
for host_interface in role_configs['host_interfaces']:
manage_ip = ''
management_addr =\
host_interface['management']['ip']
try:
ip_lists = socket.gethostbyname_ex(management_addr)
manage_ip = ip_lists[2][0]
except Exception:
if netaddr.IPAddress(management_addr).version == 6:
manage_ip = management_addr
else:
raise exception.InvalidNetworkConfig(
"manage ip is not valid %s" % management_addr)
finally:
manage_ips.append(manage_ip)
self.ha_vip = role_configs['vip']
self.share_disk_services += role_configs['share_disk_services']
self.share_cluster_disk_services += \
role_configs['share_cluster_disk_services']
local_deployment_ip = get_local_deployment_ip(manage_ips)
filename = r'/etc/zte-docker'
if local_deployment_ip:
if os.path.exists(filename):
add_service_with_host(
self.services, 'CONFIG_REPO',
'http://' + local_deployment_ip +
':18080' + '/tecs_install/')
else:
add_service_with_host(
self.services, 'CONFIG_REPO',
'http://' + local_deployment_ip + '/tecs_install/')
else:
msg = "can't find ip for yum repo"
raise exception.InvalidNetworkConfig(msg)
self.components.append('CONFIG_HA_INSTALL')
add_service_with_host(
self.services, 'CONFIG_HA_HOST',
role_configs['host_interfaces'][0]['management']['ip'])
add_service_with_hosts(self.services, 'CONFIG_HA_HOSTS',
role_configs['host_interfaces'])
ntp_host = role_configs['ntp_server'] \
if role_configs['ntp_server'] else role_configs['vip']
add_service_with_host(self.services, 'CONFIG_NTP_SERVERS',
ntp_host)
if role_configs['db_vip']:
self.db_vip = role_configs['db_vip']
add_service_with_host(
self.services, 'CONFIG_MARIADB_HOST',
role_configs['db_vip'])
else:
self.db_vip = role_configs['vip']
add_service_with_host(
self.services, 'CONFIG_MARIADB_HOST', role_configs['vip'])
if role_configs['glance_vip']:
self.glance_vip = role_configs['glance_vip']
add_service_with_host(
self.services, 'CONFIG_GLANCE_HOST',
role_configs['glance_vip'])
else:
self.glance_vip = role_configs['vip']
add_service_with_host(
self.services, 'CONFIG_GLANCE_HOST', role_configs['vip'])
if role_configs['public_vip']:
self.public_vip = role_configs['public_vip']
else:
self.public_vip = role_configs['vip']
add_service_with_host(self.services,
'CONFIG_NOVA_VNCPROXY_HOST',
self.public_vip)
add_service_with_host(self.services, 'CONFIG_PUBLIC_IP',
self.public_vip)
add_service_with_host(self.services, 'CONFIG_HORIZON_HOST',
self.public_vip)
'''
add_service_with_host(self.services, 'CONFIG_ADMIN_IP',
role_configs['vip'])
add_service_with_host(self.services, 'CONFIG_INTERNAL_IP',
role_configs['vip'])
'''
def prepare_role_service(self, is_ha, service, role_configs):
host_key_name = "CONFIG_%s_HOST" % service
hosts_key_name = "CONFIG_%s_HOSTS" % service
add_service_with_hosts(self.services, hosts_key_name,
role_configs['host_interfaces'])
if service != 'LB' and service not in ['NOVA_VNCPROXY', 'MARIADB',
'GLANCE', 'HORIZON']:
add_service_with_host(self.services, host_key_name,
role_configs['vip'])
if is_ha and service == 'LB':
add_service_with_hosts(
self.services, 'CONFIG_LB_FRONTEND_HOSTS',
role_configs['host_interfaces'])
def prepare_mode(self, is_ha, is_lb, service):
mode_key = "CONFIG_%s_INSTALL_MODE" % service
if is_ha:
self.modes.update({mode_key: 'HA'})
elif is_lb:
self.modes.update({mode_key: 'LB'})
# special process
if service == 'GLANCE':
self.modes.update(
{'CONFIG_GLANCE_API_INSTALL_MODE': 'LB'})
self.modes.update(
{'CONFIG_GLANCE_REGISTRY_INSTALL_MODE': 'LB'})
# if s == 'HEAT':
# self.modes.update({'CONFIG_HEAT_API_INSTALL_MODE': 'LB'})
# self.modes.update({'CONFIG_HEAT_API_CFN_INSTALL_MODE': 'LB'})
# if s == 'CEILOMETER':
# self.modes.update({
# 'CONFIG_CEILOMETER_API_INSTALL_MODE': 'LB'})
if service == 'IRONIC':
self.modes.update(
{'CONFIG_IRONIC_API_INSTALL_MODE': 'LB'})
else:
self.modes.update({mode_key: 'None'})
def prepare_services_in_component(self, component, service, role_configs):
if component not in self.services_in_component.keys():
self.services_in_component[component] = {}
self.services_in_component[component]["service"] = []
self.services_in_component[component][
"service"].append(service_map[service])
if component == "horizon":
self.services_in_component[component]["fip"] = self.public_vip
elif component == "database":
self.services_in_component[component]["fip"] = self.db_vip
elif component == "glance":
self.services_in_component[component]["fip"] = self.glance_vip
else:
self.services_in_component[component]["fip"] = role_configs["vip"]
network_name = ''
if component in ['horizon'] and\
'publicapi' in role_configs["host_interfaces"][0]:
network_name = 'publicapi'
else:
network_name = 'management'
self.services_in_component[component]["netmask"] = \
role_configs["host_interfaces"][0][network_name]["netmask"]
self.services_in_component[component]["nic_name"] = \
role_configs["host_interfaces"][0][network_name]["name"]
if component == 'loadbalance' and \
'CONTROLLER_LB' in self.all_configs and \
self.all_configs['CONTROLLER_LB']['vip']:
self.services_in_component[component]["fip"] = \
self.all_configs['CONTROLLER_LB']['vip']
def prepare_amqp_mariadb(self):
if self.lb_vip:
amqp_vip = ''
if self.modes['CONFIG_AMQP_INSTALL_MODE'] == 'LB':
amqp_vip = self.lb_vip
add_service_with_host(
self.services,
'CONFIG_AMQP_CLUSTER_MASTER_NODE_IP',
self.ha_master_host['ip'])
add_service_with_host(
self.services, 'CONFIG_AMQP_CLUSTER_MASTER_NODE_HOSTNAME',
self.ha_master_host['hostname'])
else:
amqp_vip = self.ha_vip
amqp_dict = "{'%s':'%s,%s,%s,%s'}" % (amqp_vip, self.ha_vip,
self.lb_vip, self.glance_vip,
self.public_vip)
mariadb_dict = "{'%s':'%s,%s,%s,%s'}" % (self.db_vip, self.ha_vip,
self.lb_vip,
self.glance_vip,
self.public_vip)
add_service_with_host(self.services, 'CONFIG_LB_HOST', self.lb_vip)
elif self.ha_vip:
amqp_dict = "{'%s':'%s,%s,%s'}" % (self.ha_vip, self.ha_vip,
self.glance_vip,
self.public_vip)
mariadb_dict = "{'%s':'%s,%s,%s'}" % (self.db_vip, self.ha_vip,
self.glance_vip,
self.public_vip)
else:
amqp_dict = "{}"
mariadb_dict = "{}"
if self.lb_vip or self.ha_vip:
add_service_with_host(self.services, 'CONFIG_MARIADB_DICT',
mariadb_dict)
add_service_with_host(self.services, 'CONFIG_AMQP_DICT', amqp_dict)
def prepare(self):
for role_name, role_configs in self.all_configs.items():
if role_name == "OTHER":
continue
is_ha = re.match(".*_HA$", role_name) is not None
is_lb = re.match(".*_LB$", role_name) is not None
is_child_cell = re.match(".*_CHILD_CELL.*", role_name) is not None
if is_child_cell:
self.prepare_child_cell(role_name, role_configs)
continue
self.prepare_ha_lb(role_configs, is_ha, is_lb)
for service, component in role_configs['services'].items():
s = service.strip().upper().replace('-', '_')
self.prepare_role_service(is_ha, s, role_configs)
self.prepare_mode(is_ha, is_lb, s)
if is_lb:
self.lb_components.append(component)
c = "CONFIG_%s_INSTALL" % \
component.strip().upper().replace('-', '_')
self.components.append(c)
if is_ha:
if component == 'log':
continue
self.prepare_services_in_component(component, service,
role_configs)
if is_ha:
self.get_heartbeats(role_configs['host_interfaces'])
self.prepare_amqp_mariadb()
if self.child_cell_dict:
add_service_with_host(self.services, 'CONFIG_CHILD_CELL_DICT',
str(self.child_cell_dict))
def update_conf_with_services(self, tecs):
for s in self.services:
if tecs.has_option("general", s):
# if type(self.services[s]) is types.ListType:
if isinstance(self.services[s], types.ListType):
if self.services[s] and not self.services[s][0]:
return
tecs.set("general", s, ','.join(self.services[s]))
else:
msg = "service %s is not exit in conf file" % s
LOG.info(msg)
def update_conf_with_components(self, tecs):
for s in self.components:
if tecs.has_option("general", s):
tecs.set("general", s, 'y')
else:
msg = "component %s is not exit in conf file" % s
LOG.info(msg)
def update_conf_with_modes(self, tecs):
for k, v in self.modes.items():
if tecs.has_option("general", k):
tecs.set("general", k, v)
else:
msg = "mode %s is not exit in conf file" % k
LOG.info(msg)
def update_tecs_conf(self, tecs):
self.update_conf_with_services(tecs)
self.update_conf_with_components(tecs)
self.update_conf_with_modes(tecs)
def update_ha_conf(self, ha, ha_nic_name, tecs=None):
if self.all_configs['OTHER'].get('dns_config'):
for heartbeat in self.heartbeats:
for name_ip in self.all_configs['OTHER']['dns_config']:
for tmp in heartbeat:
if tmp == name_ip.keys()[0]:
heartbeat.remove(tmp)
heartbeat.append(name_ip.values()[0])
for k, v in self.services_in_component.items():
for name_ip in self.all_configs['OTHER']['dns_config']:
if v['fip'] == name_ip.keys()[0]:
v['fip'] = name_ip.values()[0]
ha.set('DEFAULT', 'heartbeat_link1', ','.join(self.heartbeats[0]))
ha.set('DEFAULT', 'heartbeat_link2', ','.join(self.heartbeats[1]))
ha.set('DEFAULT', 'heartbeat_link3', ','.join(self.heartbeats[2]))
ha.set('DEFAULT', 'components', ','.join(
self.services_in_component.keys()))
for k, v in self.services_in_component.items():
ha.set('DEFAULT', k, ','.join(v['service']))
if k == 'glance':
if 'glance' in self.share_disk_services:
ha.set('DEFAULT', 'glance_device_type', 'iscsi')
ha.set(
'DEFAULT', 'glance_device',
'/dev/mapper/vg_glance-lv_glance')
ha.set('DEFAULT', 'glance_fs_type', 'ext4')
else:
ha.set('DEFAULT', 'glance_device_type', 'drbd')
ha.set(
'DEFAULT', 'glance_device', '/dev/vg_data/lv_glance')
ha.set('DEFAULT', 'glance_fs_type', 'ext4')
# mariadb now not support db cluster, don't support share disk.
if k == "database":
if 'db' in self.share_disk_services:
ha.set(
'DEFAULT', 'database_device',
'/dev/mapper/vg_db-lv_db')
ha.set('DEFAULT', 'database_fs_type', 'ext4')
ha.set('DEFAULT', 'database_device_type', 'share')
if tecs:
tecs.set(
"general",
'CONFIG_HA_INSTALL_MARIADB_LOCAL',
'n')
elif 'db' in self.share_cluster_disk_services:
ha.set(
'DEFAULT', 'database_device',
'/dev/mapper/vg_db-lv_db')
ha.set('DEFAULT', 'database_fs_type', 'ext4')
ha.set('DEFAULT', 'database_device_type', 'share_cluster')
if tecs:
tecs.set(
"general",
'CONFIG_HA_INSTALL_MARIADB_LOCAL',
'y')
else:
ha.set('DEFAULT', 'database_device_type', 'local_cluster')
if tecs:
tecs.set(
"general",
'CONFIG_HA_INSTALL_MARIADB_LOCAL',
'y')
if 'db_backup' in self.share_disk_services:
ha.set(
'DEFAULT',
'backup_database_device',
'/dev/mapper/vg_db_backup-lv_db_backup')
ha.set('DEFAULT', 'backup_database_fs_type', 'ext4')
if "mongod" in v['service']:
if 'mongodb' in self.share_disk_services:
ha.set(
'DEFAULT', 'mongod_device',
'/dev/mapper/vg_mongodb-lv_mongodb')
ha.set('DEFAULT', 'mongod_fs_type', 'ext4')
ha.set('DEFAULT', 'mongod_local', '')
if tecs:
tecs.set(
"general",
'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'n')
else:
ha.set('DEFAULT', 'mongod_fs_type', 'ext4')
ha.set('DEFAULT', 'mongod_local', 'yes')
if tecs:
tecs.set(
"general",
'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'y')
if k not in self.lb_components:
# if "bond" in v['nic_name']:
# v['nic_name'] = "vport"
ha.set('DEFAULT', k + '_fip', v['fip'])
if ha_nic_name and k not in ['horizon']:
nic_name = ha_nic_name
else:
nic_name = v['nic_name']
ha.set('DEFAULT', k + '_nic', nic_name)
cidr_netmask = reduce(lambda x, y: x + y,
[bin(int(i)).count('1')
for i in v['netmask'].split('.')])
ha.set('DEFAULT', k + '_netmask', cidr_netmask)
def update_conf(tecs, key, value):
tecs.set("general", key, value)
def get_conf(tecs_conf_file, **kwargs):
result = {}
if not kwargs:
return result
tecs = ConfigParser()
tecs.optionxform = str
tecs.read(tecs_conf_file)
result = {key: tecs.get("general", kwargs.get(key, None))
for key in kwargs.keys()
if tecs.has_option("general", kwargs.get(key, None))}
return result
def _get_physnics_info(network_type, phynics):
# bond1(active-backup;lacp;eth1-eth2)
# eth0
# phynet1:eth0
# phynet1:bond1(active-backup;lacp;eth1-eth2), phynet2:eth3
phynics_info = []
if not phynics:
return
phynic_info = phynics.split("(")
if 2 == len(phynic_info):
phynic_info = phynic_info[1][0:-1].split(";")
phynics_info.extend(phynic_info[-1].split('-'))
else:
phynic_info = phynic_info[0].split(":")
if network_type == 'vlan':
phynics_info.append(phynic_info[1])
else:
phynics_info.append(phynic_info[0])
return phynics_info
def get_physnics_info(network_type, phynics):
# bond1(active-backup;lacp;eth1-eth2)
# phynet1:eth0
# phynet1:bond1(active-backup;lacp;eth1-eth2), phynet1:eth3
phynics_info = []
if network_type == 'vxlan':
phynics_info.extend(_get_physnics_info(network_type, phynics))
elif network_type == 'vlan':
phynics = phynics.split(',')
for phynic_info in phynics:
phynics_info.extend(_get_physnics_info(network_type, phynic_info))
return phynics_info
def update_conf_with_zenic(tecs, zenic_configs):
zenic_vip = zenic_configs.get('vip')
if not zenic_vip:
return
auth = zenic_configs.get('auth')
if not auth:
auth = 'restconf:LkfhRDGIPyGzbWGM2uAaNQ=='
update_conf(tecs, 'CONFIG_ZENIC_USER_AND_PW', auth)
update_conf(tecs, 'CONFIG_ZENIC_API_NODE', '%s:8181' % zenic_vip)
ml2_drivers = tecs.get(
"general", 'CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS').split(',')
ml2_drivers.extend(['proxydriver'])
update_conf(
tecs, 'CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS', ','.join(ml2_drivers))
class DvsDaisyConfig(object):
def __init__(self, tecs, networks_config):
self.tecs = tecs
self.networks_config = networks_config
# common
self.dvs_network_type = []
self.dvs_vswitch_type = {}
self.dvs_cpu_sets = []
self.dvs_physnics = []
self.enable_sdn = False
# for vlan
self.dvs_physical_mappings = []
self.dvs_bridge_mappings = []
# for vxlan
self.dvs_vtep_ip_ranges = []
self.dvs_vxlan_info = ''
self.dvs_domain_id = {}
def config_tecs_for_dvs(self):
self._get_dvs_config()
self._set_dvs_config()
def _get_dvs_config(self):
network = self.networks_config
vswitch_type = network.get('vswitch_type')
if not vswitch_type:
return
self.dvs_vswitch_type.update(vswitch_type)
dvs_cpu_sets = network.get('dvs_cpu_sets')
self.dvs_cpu_sets.extend(dvs_cpu_sets)
network_type = network['network_config'].get('network_type')
if network_type in ['vlan']:
self.dvs_network_type.extend(['vlan'])
self._private_network_conf_for_dvs(network)
elif network_type in ['vxlan']:
self.dvs_network_type.extend(['vxlan'])
self._bearing_network_conf_for_dvs(network)
def _set_dvs_config(self):
if not self.networks_config.get('enable_sdn') and (
self.dvs_vswitch_type.get('ovs_agent_patch')) and (
len(self.dvs_vswitch_type.get('ovs_agent_patch')) > 0):
return
if not self.dvs_vswitch_type.get('ovs_agent_patch') and not\
self.dvs_vswitch_type.get('ovdk'):
return
update_conf(self.tecs, 'CONFIG_DVS_TYPE', self.dvs_vswitch_type)
update_conf(self.tecs, 'CONFIG_DVS_PHYSICAL_NICS',
",".join(set(self.dvs_physnics)))
# cpu sets for dvs, add CONFIG_DVS_CPU_SETS to tecs.conf firstly
update_conf(self.tecs, 'CONFIG_DVS_CPU_SETS', self.dvs_cpu_sets)
if 'vlan' in self.dvs_network_type:
update_conf(self.tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS',
self.dvs_bridge_mappings)
update_conf(self.tecs, 'CONFIG_NEUTRON_OVS_PHYSNET_IFACES',
self.dvs_physical_mappings)
elif 'vxlan' in self.dvs_network_type:
update_conf(self.tecs, 'CONFIG_DVS_VXLAN_INFO',
self.dvs_vxlan_info)
update_conf(self.tecs, 'CONFIG_DVS_NODE_DOMAIN_ID',
self.dvs_domain_id)
update_conf(self.tecs, 'CONFIG_NEUTRON_ML2_VTEP_IP_RANGES',
self.dvs_vtep_ip_ranges)
'''
private_networks_config_for_dvs
{
network_config = {
enable_sdn = ''
network_type = ['vlan']
}
vswitch_type = { ===============> such as vxlan
'ovdk': ['192.168.0.2', '192.168.0.20'] ,
'ovs_agent_patch': ['192.168.0.21', '192.168.0.30']
}
physnics_config = {
physical_mappings = eth0 ===============> such as ovs vlan
bridge_mappings = ==========> private->name & physical_name
}
}
'''
def _private_network_conf_for_dvs(self, private_network):
self.dvs_vswitch_type.update(private_network.get('vswitch_type'))
self.dvs_bridge_mappings = \
private_network['physnics_config'].get('bridge_mappings')
self.dvs_physical_mappings = \
private_network['physnics_config'].get('physical_mappings')
self.dvs_physical_mappings = self.dvs_physical_mappings.encode('utf8')
self.dvs_physnics.extend(
get_physnics_info('vlan', self.dvs_physical_mappings))
'''
bearing_networks_config
{
network_config = {
enable_sdn = ''
network_type = ['vxlan']
vtep_ip_ranges=[['192.168.0.2','192.168.0.200']]==>bearing->ip_range
}
vswitch_type = { ==========> bearing->assigned_network
'ovdk': ['192.168.0.2', '192.168.0.20'] ,
'ovs_agent_patch': ['192.168.0.21', '192.168.0.30']
}
physnics_config = {
vxlan_info = eth0 ======>bearing->assigned_network->host_interface
domain_id = { ==========> bearing->assigned_network
'0': ['192.168.0.2', '192.168.0.20'] ,
'1': ['192.168.0.21', '192.168.0.30']
}
}
}
'''
def _bearing_network_conf_for_dvs(self, bearing_network):
self.dvs_vtep_ip_ranges.extend(
bearing_network['network_config'].get('vtep_ip_ranges'))
self.dvs_vswitch_type.update(bearing_network.get('vswitch_type'))
self.dvs_domain_id.update(
bearing_network['physnics_config'].get('dvs_domain_id'))
self.dvs_vxlan_info = \
bearing_network['physnics_config'].get('vxlan_info')
self.dvs_physnics.extend(
get_physnics_info('vxlan', self.dvs_vxlan_info))
default_tecs_conf_template_path = "/var/lib/daisy/tecs/"
tecs_conf_template_path = default_tecs_conf_template_path
def private_network_conf(tecs, private_networks_config):
if private_networks_config:
mode_str = {
'0': '(active-backup;off;"%s-%s")',
'1': '(balance-slb;off;"%s-%s")',
'2': '(balance-tcp;active;"%s-%s")'
}
config_neutron_sriov_bridge_mappings = []
config_neutron_sriov_physnet_ifaces = []
config_neutron_ovs_bridge_mappings = []
config_neutron_ovs_physnet_ifaces = []
for private_network in private_networks_config:
type = private_network.get('type', None)
name = private_network.get('name', None)
assign_networks = private_network.get('assigned_networks', None)
slave1 = private_network.get('slave1', None)
slave2 = private_network.get('slave2', None)
mode = private_network.get('mode', None)
if not type or not name or not assign_networks or not\
slave1 or not slave2 or not mode:
break
for assign_network in assign_networks:
network_type = assign_network.get('network_type', None)
# TODO:why ml2_type & physnet_name is null
ml2_type = assign_network.get('ml2_type', None)
physnet_name = assign_network.get('physnet_name', None)
if not network_type or not ml2_type or not physnet_name:
break
# ether
if 0 == cmp(type, 'ether') and\
0 == cmp(network_type, 'DATAPLANE'):
if 0 == cmp(ml2_type, 'sriov'):
config_neutron_sriov_bridge_mappings.append(
"%s:%s" % (physnet_name, "br-" + name))
config_neutron_sriov_physnet_ifaces.append(
"%s:%s" % (physnet_name, name))
elif 0 == cmp(ml2_type, 'ovs'):
config_neutron_ovs_bridge_mappings.append(
"%s:%s" % (physnet_name, "br-" + name))
config_neutron_ovs_physnet_ifaces.append(
"%s:%s" % (physnet_name, name))
# bond
elif 0 == cmp(type, 'bond') and\
0 == cmp(network_type, 'DATAPLANE'):
if 0 == cmp(ml2_type, 'sriov'):
config_neutron_sriov_bridge_mappings.append(
"%s:%s" % (physnet_name, "br-" + name))
config_neutron_sriov_physnet_ifaces.append(
"%s:%s" % (physnet_name, name + mode_str[mode]
% (slave1, slave2)))
elif 0 == cmp(ml2_type, 'ovs'):
config_neutron_ovs_bridge_mappings.append(
"%s:%s" % (physnet_name, "br-" + name))
config_neutron_ovs_physnet_ifaces.append(
"%s:%s" % (physnet_name, name + mode_str[mode]
% (slave1, slave2)))
if config_neutron_sriov_bridge_mappings:
update_conf(tecs,
'CONFIG_NEUTRON_SRIOV_BRIDGE_MAPPINGS',
",".join(config_neutron_sriov_bridge_mappings))
if config_neutron_sriov_physnet_ifaces:
update_conf(tecs,
'CONFIG_NEUTRON_SRIOV_PHYSNET_IFACES',
",".join(config_neutron_sriov_physnet_ifaces))
if config_neutron_ovs_bridge_mappings:
update_conf(tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS',
",".join(config_neutron_ovs_bridge_mappings))
if config_neutron_ovs_physnet_ifaces:
update_conf(tecs, 'CONFIG_NEUTRON_OVS_PHYSNET_IFACES',
",".join(config_neutron_ovs_physnet_ifaces))
def update_tecs_config(config_data, cluster_conf_path):
msg = "tecs config data is: %s" % config_data
LOG.info(msg)
daisy_tecs_path = tecs_conf_template_path
tecs_conf_template_file = os.path.join(daisy_tecs_path, "tecs.conf")
ha_conf_template_file = os.path.join(daisy_tecs_path, "HA.conf")
if not os.path.exists(cluster_conf_path):
os.makedirs(cluster_conf_path)
tecs_conf_out = os.path.join(cluster_conf_path, "tecs.conf")
ha_config_out = os.path.join(cluster_conf_path, "HA_1.conf")
tecs = ConfigParser()
tecs.optionxform = str
tecs.read(tecs_conf_template_file)
cluster_data = config_data['OTHER']['cluster_data']
update_conf(tecs, 'CLUSTER_ID', cluster_data['id'])
# if cluster_data.has_key('networking_parameters'):
if 'networking_parameters' in cluster_data:
networking_parameters = cluster_data['networking_parameters']
# if networking_parameters.has_key('base_mac') and\
if 'base_mac'in networking_parameters and\
networking_parameters['base_mac']:
update_conf(
tecs, 'CONFIG_NEUTRON_BASE_MAC',
networking_parameters['base_mac'])
# if networking_parameters.has_key('gre_id_range') and\
if 'gre_id_range' in networking_parameters and\
len(networking_parameters['gre_id_range']) > 1 \
and networking_parameters['gre_id_range'][0] and\
networking_parameters['gre_id_range'][1]:
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES',
("%s:%s" % (networking_parameters['gre_id_range'][0],
networking_parameters['gre_id_range'][1])))
if 'vxlan' in config_data['OTHER'].get('segmentation_type', {}):
update_conf(
tecs, 'CONFIG_NEUTRON_ML2_VNI_RANGES',
config_data['OTHER']['segmentation_type']['vxlan']['vni_range'])
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES', 'vxlan')
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TYPE_DRIVERS', 'vxlan')
else:
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES', 'vlan')
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TYPE_DRIVERS', 'vlan')
physic_network_cfg = config_data['OTHER']['physic_network_config']
if physic_network_cfg.get('json_path', None):
update_conf(
tecs, 'CONFIG_NEUTRON_ML2_JSON_PATH',
physic_network_cfg['json_path'])
if physic_network_cfg.get('vlan_ranges', None):
update_conf(tecs, 'CONFIG_NEUTRON_ML2_VLAN_RANGES',
physic_network_cfg['vlan_ranges'])
if config_data['OTHER']['tecs_installed_hosts']:
update_conf(tecs, 'EXCLUDE_SERVERS', ",".join(
config_data['OTHER']['tecs_installed_hosts']))
ha = ConfigParser()
ha.optionxform = str
ha.read(ha_conf_template_file)
config = AnalsyConfig(config_data)
# if config_data['OTHER'].has_key('ha_nic_name'):
if 'ha_nic_name'in config_data['OTHER']:
ha_nic_name = config_data['OTHER']['ha_nic_name']
else:
ha_nic_name = ""
config.prepare()
config.update_tecs_conf(tecs)
config.update_ha_conf(ha, ha_nic_name, tecs)
update_conf_with_zenic(tecs, config_data['OTHER']['zenic_config'])
# if config_data['OTHER']['dvs_config'].has_key('network_config'):
if 'network_config' in config_data['OTHER']['dvs_config']:
config_data['OTHER']['dvs_config']['network_config']['enable_sdn'] = \
config_data['OTHER']['zenic_config'].get('vip', False)
dvs_config = DvsDaisyConfig(tecs, config_data['OTHER']['dvs_config'])
dvs_config.config_tecs_for_dvs()
tecs.write(open(tecs_conf_out, "w+"))
ha.write(open(ha_config_out, "w+"))
return
def test():
print("Hello, world!")

View File

@ -1,315 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for tecs API
"""
import subprocess
from oslo_log import log as logging
from daisy import i18n
from daisy.common import exception
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
def _get_service_disk_for_disk_array(req, role_id):
disk_info = []
service_disks = tecs_cmn.get_service_disk_list(req,
{'filters': {
'role_id': role_id}})
for service_disk in service_disks:
share_disk = {}
if service_disk['disk_location'] == 'share':
share_disk['service'] = service_disk['service']
share_disk['protocol_type'] = service_disk['protocol_type']
share_disk['lun'] = service_disk['lun']
if service_disk['protocol_type'] == 'FIBER':
share_disk['fc_hba_wwpn'] = \
service_disk['data_ips'].split(',')
else:
share_disk['data_ips'] = service_disk['data_ips'].split(',')
share_disk['lvm_config'] = {}
share_disk['lvm_config']['size'] = service_disk['size']
share_disk['lvm_config']['vg_name'] =\
'vg_%s' % service_disk['service']
share_disk['lvm_config']['lv_name'] =\
'lv_%s' % service_disk['service']
share_disk['lvm_config']['fs_type'] = 'ext4'
disk_info.append(share_disk)
return disk_info
def _get_share_cluster_disk_for_disk_array(req, role_id):
'''
disk_info = [{'service': 'db', 'lun': 'lun1', 'data_ips':'data_ip1'},
{'service': 'db', 'lun': 'lun2', 'data_ips':'data_ip2'},
{'service': 'glance', 'lun': 'lun3', 'data_ips':'data_ip3'},
{'service': 'glance', 'lun': 'lun4', 'data_ips':'data_ip4'},]
'''
disk_info = []
service_disks = \
tecs_cmn.get_service_disk_list(req, {'filters': {'role_id': role_id}})
service_name = 'db'
for service_disk in service_disks:
share_cluster_disk = {}
if service_disk['disk_location'] == 'share_cluster':
share_cluster_disk['service'] = service_disk['service']
share_cluster_disk['protocol_type'] = service_disk['protocol_type']
share_cluster_disk['lun'] = service_disk['lun']
if service_disk['protocol_type'] == 'FIBER':
share_cluster_disk['fc_hba_wwpn'] = \
service_disk['data_ips'].split(',')
else:
share_cluster_disk['data_ips'] = \
service_disk['data_ips'].split(',')
share_cluster_disk['lvm_config'] = {}
share_cluster_disk['lvm_config']['size'] = service_disk['size']
share_cluster_disk['lvm_config']['vg_name'] =\
'vg_%s' % service_disk['service']
share_cluster_disk['lvm_config']['lv_name'] =\
'lv_%s' % service_disk['service']
share_cluster_disk['lvm_config']['fs_type'] = 'ext4'
disk_info.append(share_cluster_disk)
return disk_info
def _get_cinder_volume_for_disk_array(req, role_id):
cinder_volume_info = []
cinder_volumes = tecs_cmn.get_cinder_volume_list(req,
{'filters': {
'role_id': role_id}})
for cinder_volume in cinder_volumes:
cv_info = {}
cv_info['management_ips'] =\
cinder_volume['management_ips'].split(',')
cv_info['data_ips'] = cinder_volume['data_ips'].split(',')
cv_info['user_name'] = cinder_volume['user_name']
cv_info['user_pwd'] = cinder_volume['user_pwd']
index = cinder_volume['backend_index']
cv_info['backend'] = {index: {}}
cv_info['backend'][index]['volume_driver'] =\
cinder_volume['volume_driver']
cv_info['backend'][index]['volume_type'] =\
cinder_volume['volume_type']
cv_info['backend'][index]['pools'] =\
cinder_volume['pools'].split(',')
cinder_volume_info.append(cv_info)
return cinder_volume_info
def get_disk_array_info(req, cluster_id):
share_disk_info = []
share_cluster_disk_info = []
volume_disk_info = {}
cinder_volume_disk_list = []
roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
for role in roles:
if role['deployment_backend'] != daisy_cmn.tecs_backend_name:
continue
if role['name'] == 'CONTROLLER_HA':
share_disks = _get_service_disk_for_disk_array(req, role['id'])
share_cluster_disks = \
_get_share_cluster_disk_for_disk_array(req, role['id'])
share_disk_info += share_disks
share_cluster_disk_info += share_cluster_disks
cinder_volumes =\
_get_cinder_volume_for_disk_array(req, role['id'])
cinder_volume_disk_list += cinder_volumes
if cinder_volume_disk_list:
volume_disk_info['disk_array'] = cinder_volume_disk_list
return (share_disk_info, volume_disk_info, share_cluster_disk_info)
def config_ha_share_disk(share_disk_info,
controller_ha_nodes,
share_cluster_disk_info=None):
'''
share_disk_info = \
[{'service': 'db', 'lun': 'lun1', 'data_ips':'data_ip1'},
{'service': 'glance', 'lun': 'lun3', 'data_ips':'data_ip3'},]
share_cluster_disk_info = \
[{'service': 'db', 'lun': 'lun1', 'data_ips':'data_ip1', ...},
{'service': 'db', 'lun': 'lun2', 'data_ips':'data_ip2', ...},
{'service': 'glance', 'lun': 'lun3', 'data_ips':'data_ip3'},
{'service': 'glance', 'lun': 'lun4', 'data_ips':'data_ip4'},]
controller_ha_nodes[host_ip] = min_mac
'''
sorted_db_share_cluster = []
if share_cluster_disk_info:
db_share_cluster_disk = \
[disk for disk in share_cluster_disk_info
if disk['service'] == 'db']
if len(db_share_cluster_disk) != 2:
error_msg = 'share cluster disk: %s must be existed in pair.' % \
db_share_cluster_disk
LOG.error(error_msg)
raise exception.InstallException(error_msg)
sorted_db_share_cluster = \
sorted(db_share_cluster_disk, key=lambda s: s['lun'])
sorted_ha_nodes = \
sorted(controller_ha_nodes.iteritems(), key=lambda d: d[1])
sorted_ha_nodes_ip = [node[0] for node in sorted_ha_nodes]
all_share_disk_info = []
if sorted_db_share_cluster:
all_share_disk_info = \
[[disk] + share_disk_info for disk in sorted_db_share_cluster]
# all_share_disk_info = \
# [[{'lun': 'lun1', 'service': 'db', 'data_ips': 'data_ip1'},
# {'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'}],
# [{'lun': 'lun2', 'service': 'db', 'data_ips': 'data_ip2'},
# {'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'}]]
else:
for index in range(len(sorted_ha_nodes)):
all_share_disk_info.append(share_disk_info)
# all_share_disk_info = \
# [{'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'},
# {'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'}]
'''
cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json'
daisy_cmn.subprocess_call(cmd)
with open("/var/lib/daisy/tecs/storage_auto_config/base/control.json",\
"w") as fp:
json.dump(share_disk_info, fp, indent=2)
for host_ip in controller_ha_nodes.keys():
try:
scp_bin_result = subprocess.check_output(
'scp -o StrictHostKeyChecking=no -r\
/var/lib/daisy/tecs/storage_auto_config\
%s:/home/tecs_install' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\
to %s failed!" % host_ip
raise exception.InstallException(error_msg)
try:
LOG.info(_("Config share disk for host %s" % host_ip))
cmd = "cd /home/tecs_install/storage_auto_config/;\
python storage_auto_config.py share_disk %s"\
% controller_ha_nodes[host_ip]
exc_result = subprocess.check_output(
'clush -S -w %s "%s"' % (host_ip,cmd),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
LOG.info(_("Storage script error message: %s" % e.output))
error_msg = "config Disk Array share disks\
on %s failed!" % host_ip
raise exception.InstallException(error_msg)
'''
cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json'
daisy_cmn.subprocess_call(cmd)
for (host_ip, share_disk) in zip(sorted_ha_nodes_ip, all_share_disk_info):
with open("/var/lib/daisy/tecs/storage_auto_config/base/control.json",
"w") as fp:
json.dump(share_disk, fp, indent=2)
try:
subprocess.check_output(
'scp -o StrictHostKeyChecking=no -r\
/var/lib/daisy/tecs/storage_auto_config\
%s:/home/tecs_install' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\
to %s failed!" % host_ip
raise exception.InstallException(error_msg)
try:
LOG.info(_("Config share disk for host %s" % host_ip))
cmd = "cd /home/tecs_install/storage_auto_config/;\
python storage_auto_config.py share_disk %s"\
% controller_ha_nodes[host_ip]
subprocess.check_output(
'clush -S -w %s "%s"' % (host_ip, cmd),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
LOG.info(_("Storage script error message: %s" % e.output))
error_msg = "config Disk Array share disks\
on %s failed!" % host_ip
raise exception.InstallException(error_msg)
def config_ha_cinder_volume(volume_disk_info, controller_ha_ips):
cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json'
daisy_cmn.subprocess_call(cmd)
with open("/var/lib/daisy/tecs/storage_auto_config/base/cinder.json",
"w") as fp:
json.dump(volume_disk_info, fp, indent=2)
for host_ip in controller_ha_ips:
try:
subprocess.check_output(
'scp -o StrictHostKeyChecking=no -r\
/var/lib/daisy/tecs/storage_auto_config\
%s:/home/tecs_install' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\
to %s failed!" % host_ip
raise exception.InstallException(error_msg)
try:
LOG.info(_("Config cinder volume for host %s" % host_ip))
cmd = 'cd /home/tecs_install/storage_auto_config/;\
python storage_auto_config.py cinder_conf %s' % host_ip
subprocess.check_output(
'clush -S -w %s "%s"' % (host_ip, cmd),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
LOG.info(_("Storage script error message: %s" % e.output))
error_msg = "config Disk Array cinder volumes\
on %s failed!" % host_ip
raise exception.InstallException(error_msg)
def config_compute_multipath(hosts_ip):
for host_ip in hosts_ip:
try:
subprocess.check_output(
'scp -o StrictHostKeyChecking=no -r\
/var/lib/daisy/tecs/storage_auto_config\
%s:/home/tecs_install' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\
to %s failed!" % host_ip
raise exception.InstallException(error_msg)
try:
LOG.info(_("Config multipath for host %s" % host_ip))
cmd = 'cd /home/tecs_install/storage_auto_config/;\
python storage_auto_config.py check_multipath'
subprocess.check_output(
'clush -S -w %s "%s"' % (host_ip, cmd),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
LOG.info(_("Storage script error message: %s" % e.output))
error_msg = "config Disk Array multipath\
on %s failed!" % host_ip
raise exception.InstallException(error_msg)

File diff suppressed because it is too large Load Diff

View File

@ -1,150 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/hosts endpoint for Daisy v1 API
"""
import subprocess
from oslo_log import log as logging
from daisy import i18n
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
tecs_state = tecs_cmn.TECS_STATE
def update_progress_to_db(req, role_id_list, status, hosts_list, host_ip=None):
"""
Write uninstall progress and status to db,
we use global lock object 'uninstall_mutex'
to make sure this function is thread safety.
:param req: http req.
:param role_id_list: Column neeb be update in role table.
:param status: Uninstall status.
:return:
"""
for role_id in role_id_list:
role_hosts = daisy_cmn.get_hosts_of_role(req, role_id)
for host_id_ip in hosts_list:
host_ip_tmp = host_id_ip.values()[0]
host_id_tmp = host_id_ip.keys()[0]
if host_ip:
for role_host in role_hosts:
if (host_ip_tmp == host_ip and
role_host['host_id'] == host_id_tmp):
role_host_meta = {}
if 0 == cmp(status, tecs_state['UNINSTALLING']):
role_host_meta['progress'] = 10
role_host_meta['messages'] = 'TECS uninstalling'
if 0 == cmp(status, tecs_state['UNINSTALL_FAILED']):
role_host_meta[
'messages'] = 'TECS uninstalled failed'
elif 0 == cmp(status, tecs_state['INIT']):
role_host_meta['progress'] = 100
role_host_meta[
'messages'] = 'TECS uninstalled successfully'
if role_host_meta:
role_host_meta['status'] = status
daisy_cmn.update_role_host(req, role_host['id'],
role_host_meta)
else:
role = {}
if 0 == cmp(status, tecs_state['UNINSTALLING']):
for role_host in role_hosts:
role_host_meta = {}
role_host_meta['status'] = status
role_host_meta['progress'] = 0
daisy_cmn.update_role_host(req,
role_host['id'],
role_host_meta)
role['progress'] = 0
role['messages'] = 'TECS uninstalling'
if 0 == cmp(status, tecs_state['UNINSTALL_FAILED']):
role['messages'] = 'TECS uninstalled failed'
elif 0 == cmp(status, tecs_state['INIT']):
role['progress'] = 100
role['messages'] = 'TECS uninstalled successfully'
if role:
role['status'] = status
daisy_cmn.update_role(req, role_id, role)
def _thread_bin(req, host_ip, role_id_list, hosts_list):
# uninstall network-configuration-1.1.1-15.x86_64.rpm
update_progress_to_db(
req, role_id_list, tecs_state['UNINSTALLING'], hosts_list, host_ip)
tecs_cmn.TecsShellExector(host_ip, 'uninstall_rpm')
cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/'
daisy_cmn.subprocess_call(cmd)
password = "ossdbg1"
var_log_path = "/var/log/daisy/daisy_uninstall/\
%s_uninstall_tecs.log" % host_ip
with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s "rm -rf /home/daisy_uninstall"' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -w %s "mkdir -p /home/daisy_uninstall"' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp)
try:
subprocess.check_output(
'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin \
--dest=/home/daisy_uninstall' % (
host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_progress_to_db(
req, role_id_list, tecs_state[
'UNINSTALL_FAILED'], hosts_list, host_ip)
LOG.error(_("scp TECS bin for %s failed!" % host_ip))
fp.write(e.output.strip())
cmd = 'clush -S -w %s "chmod 777 /home/daisy_uninstall/*"' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp)
try:
exc_result = subprocess.check_output(
'clush -S -w %s /home/daisy_uninstall/ZXTECS*.bin clean' % (
host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_progress_to_db(
req, role_id_list, tecs_state[
'UNINSTALL_FAILED'], hosts_list, host_ip)
LOG.error(_("Uninstall TECS for %s failed!" % host_ip))
fp.write(e.output.strip())
else:
update_progress_to_db(req, role_id_list, tecs_state['INIT'],
hosts_list, host_ip)
LOG.info(_("Uninstall TECS for %s successfully!" % host_ip))
fp.write(exc_result)
# this will be raise raise all the exceptions of the thread to log file
def thread_bin(req, host_ip, role_id_list, hosts_list):
try:
_thread_bin(req, host_ip, role_id_list, hosts_list)
except Exception as e:
LOG.exception(e.message)

View File

@ -1,145 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/update endpoint for Daisy v1 API
"""
import subprocess
from oslo_log import log as logging
from daisy import i18n
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
tecs_state = tecs_cmn.TECS_STATE
def update_progress_to_db(req, role_id_list, status, hosts_list, host_ip=None):
"""
Write update progress and status to db,
to make sure this function is thread safety.
:param req: http req.
:param role_id_list: Column neeb be update in role table.
:param status: Update status.
:return:
"""
for role_id in role_id_list:
role_hosts = daisy_cmn.get_hosts_of_role(req, role_id)
for host_id_ip in hosts_list:
host_ip_tmp = host_id_ip.values()[0]
host_id_tmp = host_id_ip.keys()[0]
if host_ip:
for role_host in role_hosts:
if (host_ip_tmp == host_ip and
role_host['host_id'] == host_id_tmp):
role_host_meta = {}
if 0 == cmp(status, tecs_state['UPDATING']):
role_host_meta['progress'] = 10
role_host_meta['messages'] = 'TECS upgrading'
if 0 == cmp(status, tecs_state['UPDATE_FAILED']):
role_host_meta['messages'] = 'TECS upgraded failed'
elif 0 == cmp(status, tecs_state['ACTIVE']):
role_host_meta['progress'] = 100
role_host_meta[
'messages'] = 'TECS upgraded successfully'
if role_host_meta:
role_host_meta['status'] = status
daisy_cmn.update_role_host(req,
role_host['id'],
role_host_meta)
else:
role = {}
if 0 == cmp(status, tecs_state['UPDATING']):
for role_host in role_hosts:
if role_host['status'] == tecs_state['INSTALL_FAILED']:
continue
role_host_meta = {}
role_host_meta['status'] = status
role_host_meta['progress'] = 0
role_host_meta['messages'] = 'TECS upgrading'
daisy_cmn.update_role_host(req,
role_host['id'],
role_host_meta)
role['progress'] = 0
role['messages'] = 'TECS upgrading'
if 0 == cmp(status, tecs_state['UPDATE_FAILED']):
role['messages'] = 'TECS upgraded failed'
elif 0 == cmp(status, tecs_state['ACTIVE']):
role['progress'] = 100
role['messages'] = 'TECS upgraded successfully'
if role:
role['status'] = status
daisy_cmn.update_role(req, role_id, role)
def thread_bin(req, role_id_list, host_ip, hosts_list):
# update network-configuration-1.1.1-15.x86_64.rpm
update_progress_to_db(
req, role_id_list, tecs_state['UPDATING'], hosts_list, host_ip)
cmd = 'mkdir -p /var/log/daisy/daisy_update/'
daisy_cmn.subprocess_call(cmd)
password = "ossdbg1"
var_log_path = "/var/log/daisy/daisy_update/%s_update_tecs.log" % host_ip
with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -w %s "mkdir -p /home/tecs_update/"' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s "rm -rf /home/tecs_update/ZXTECS*.bin"' % (
host_ip,)
daisy_cmn.subprocess_call(cmd, fp)
tecs_cmn.TecsShellExector(host_ip, 'update_rpm')
try:
subprocess.check_output(
'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin \
--dest=/home/tecs_update' % (
host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_progress_to_db(
req, role_id_list, tecs_state[
'UPDATE_FAILED'], hosts_list, host_ip)
LOG.error(_("scp TECS bin for %s failed!" % host_ip))
fp.write(e.output.strip())
return 1
cmd = 'clush -S -w %s "chmod 777 /home/tecs_update/*"' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp)
try:
exc_result = subprocess.check_output(
'clush -S -w %s "/home/tecs_update/ZXTECS*.bin upgrade"' % (
host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_progress_to_db(
req, role_id_list, tecs_state[
'UPDATE_FAILED'], hosts_list, host_ip)
LOG.error(_("Update TECS for %s failed!" % host_ip))
fp.write(e.output.strip())
return 2
else:
update_progress_to_db(
req, role_id_list, tecs_state['ACTIVE'], hosts_list, host_ip)
fp.write(exc_result)
return 0

View File

@ -1,142 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for tecs API
"""
import daisy.registry.client.v1.api as registry
import daisy.api.backends.common as daisy_cmn
from daisy.common import utils
def _write_role_configs_to_db(req, cluster_id, role_name, configs):
config_meta = {'cluster': cluster_id,
'role': role_name,
'config': configs}
registry.config_interface_metadata(req.context,
config_meta)
def _write_host_configs_to_db(req, host_id, configs):
config_meta = {'host_id': host_id,
'config': configs}
registry.config_interface_metadata(req.context,
config_meta)
def _get_config_item(file, section, key, value, description):
return {'file-name': file,
'section': section,
'key': key,
'value': value,
'description': description}
def _add_configs_for_nova(req, host_detail):
config_file = '/etc/nova/nova.conf'
default_section = 'DEFAULT'
key_name = 'vcpu_pin_set'
key_value = host_detail.get(key_name)
config_items = []
if not key_value:
key_value = host_detail.get('isolcpus')
nova_key_name = key_name
description = 'vcpu pin set for all vm'
item = _get_config_item(config_file,
default_section,
nova_key_name,
key_value,
description)
config_items.append(item)
key_name = 'dvs_high_cpuset'
key_value = host_detail.get(key_name)
nova_key_name = 'dvs_high_cpu_set'
description = 'vcpu pin set for high-performance dvs vm'
item = _get_config_item(config_file,
default_section,
nova_key_name,
key_value,
description)
config_items.append(item)
numa_cpus = utils.get_numa_node_cpus(host_detail.get('cpu', {}))
numa_nodes = utils.get_numa_node_from_cpus(numa_cpus, key_value)
if numa_nodes:
libvirt_section = 'libvirt'
nova_key_name = 'reserved_huge_pages'
# only support one NUMA node for DVS now
key_value = 'node:%s,size:1048576,count:4' % numa_nodes[0]
description = 'reserved huges for DVS service '\
'on high NUMA node'
config_items.append({'file-name': config_file,
'key': nova_key_name,
'section': libvirt_section,
'value': key_value,
'description': description})
key_name = 'pci_high_cpuset'
pci_key_value = host_detail.get(key_name)
nova_key_name = 'vsg_card_cpu_set'
description = 'vcpu pin set for high-performance CLC card vm'
item = _get_config_item(config_file,
default_section,
nova_key_name,
pci_key_value,
description)
config_items.append(item)
if pci_key_value:
nova_key_name = 'default_ephemeral_format'
description = 'config for CLC card'
key_value = 'ext3'
item = _get_config_item(config_file,
default_section,
nova_key_name,
key_value,
description)
config_items.append(item)
nova_key_name = 'pci_passthrough_whitelist'
description = 'config for CLC card'
key_value = '[{"vendor_id": "8086","product_id": "0435"}]'
item = _get_config_item(config_file,
default_section,
nova_key_name,
key_value,
description)
config_items.append(item)
_write_host_configs_to_db(req,
host_detail['id'],
config_items)
def update_configset(req, cluster_id):
roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
for role in roles:
# now only computer has configs
if role['name'] != 'COMPUTER':
continue
role_meta = {'config_set_update_progress': 0}
daisy_cmn.update_role(req, role['id'], role_meta)
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id'])
for host in role_hosts:
host_detail = daisy_cmn.get_host_detail(req, host['host_id'])
_add_configs_for_nova(req, host_detail)

View File

@ -1,186 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for zenic API
"""
import time
from oslo_log import log as logging
import threading
from daisy import i18n
from daisy.common import exception
from daisy.api.backends import driver
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.zenic.common as zenic_cmn
import daisy.api.backends.zenic.install as instl
import daisy.api.backends.zenic.uninstall as unstl
import daisy.api.backends.zenic.upgrade as upgrd
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
zenic_state = zenic_cmn.ZENIC_STATE
class API(driver.DeploymentDriver):
def __init__(self):
super(API, self).__init__()
return
def install(self, req, cluster_id):
"""
Install zenic to a cluster.
param req: The WSGI/Webob Request object
cluster_id:cluster id
"""
# instl.pxe_server_build(req, install_meta)
# get hosts config which need to install OS
# hosts_need_os = instl.get_cluster_hosts_config(req, cluster_id)
# if have hosts need to install os, ZENIC installataion executed
# in OSInstallTask
# if hosts_need_os:
# os_install_obj = instl.OSInstallTask(req, cluster_id, hosts_need_os)
# os_install_thread = Thread(target=os_install_obj.run)
# os_install_thread.start()
# else:
LOG.info(
_("No host need to install os, begin install ZENIC for cluster %s."
% cluster_id))
zenic_install_task = instl.ZENICInstallTask(req, cluster_id)
zenic_install_task.start()
LOG.info((_("begin install zenic, please waiting....")))
time.sleep(5)
LOG.info((_("install zenic successfully")))
def uninstall(self, req, cluster_id):
"""
Uninstall ZENIC to a cluster.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
(role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(
req, cluster_id)
if role_id_list:
if not hosts_list:
msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg)
unstl.update_progress_to_db(
req, role_id_list, zenic_state['UNINSTALLING'], 0.0)
uninstall_progress_percentage =\
round(1 * 1.0 / len(hosts_list), 2) * 100
threads = []
for host in hosts_list:
t = threading.Thread(target=unstl.thread_bin, args=(
req, host, role_id_list, uninstall_progress_percentage))
t.setDaemon(True)
t.start()
threads.append(t)
LOG.info(_("uninstall threads have started, please waiting...."))
try:
for t in threads:
t.join()
except:
LOG.warn(_("Join uninstall thread %s failed!" % t))
else:
uninstall_failed_flag = False
for role_id in role_id_list:
role = daisy_cmn.get_role_detail(req, role_id)
if role['progress'] == 100:
unstl.update_progress_to_db(
req, role_id_list, zenic_state['UNINSTALL_FAILED'])
uninstall_failed_flag = True
break
if role['status'] == zenic_state['UNINSTALL_FAILED']:
uninstall_failed_flag = True
break
if not uninstall_failed_flag:
LOG.info(
_("all uninstall threads have done,\
set all roles status to 'init'!"))
unstl.update_progress_to_db(
req, role_id_list, zenic_state['INIT'])
LOG.info((_("begin uninstall zenic, please waiting....")))
time.sleep(5)
LOG.info((_("uninstall zenic successfully")))
def upgrade(self, req, cluster_id):
"""
update zenic to a cluster.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
(role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(
req, cluster_id)
if not hosts_list:
msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg)
upgrd.update_progress_to_db(
req, role_id_list, zenic_state['UPDATING'], 0.0)
update_progress_percentage = round(1 * 1.0 / len(hosts_list), 2) * 100
threads = []
for host in hosts_list:
t = threading.Thread(target=upgrd.thread_bin, args=(
req, host, role_id_list, update_progress_percentage))
t.setDaemon(True)
t.start()
threads.append(t)
LOG.info(_("upgrade threads have started, please waiting...."))
try:
for t in threads:
t.join()
except:
LOG.warn(_("Join upgrade thread %s failed!" % t))
else:
update_failed_flag = False
for role_id in role_id_list:
role = daisy_cmn.get_role_detail(req, role_id)
if role['progress'] == 0:
upgrd.update_progress_to_db(
req, role_id_list, zenic_state['UPDATE_FAILED'])
update_failed_flag = True
break
if role['status'] == zenic_state['UPDATE_FAILED']:
update_failed_flag = True
break
if not update_failed_flag:
LOG.info(
_("all update threads have done, \
set all roles status to 'active'!"))
upgrd.update_progress_to_db(
req, role_id_list, zenic_state['ACTIVE'])

View File

@ -1,319 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for zenic API
"""
import os
import copy
import subprocess
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from daisy import i18n
from daisy.common import exception
import daisy.registry.client.v1.api as registry
import daisy.api.backends.common as daisy_cmn
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
daisy_zenic_path = '/var/lib/daisy/zenic/'
ZENIC_STATE = {
'INIT': 'init',
'INSTALLING': 'installing',
'ACTIVE': 'active',
'INSTALL_FAILED': 'install-failed',
'UNINSTALLING': 'uninstalling',
'UNINSTALL_FAILED': 'uninstall-failed',
'UPDATING': 'updating',
'UPDATE_FAILED': 'update-failed',
}
def get_cluster_hosts(req, cluster_id):
try:
cluster_hosts = registry.get_cluster_hosts(req.context, cluster_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return cluster_hosts
def get_host_detail(req, host_id):
try:
host_detail = registry.get_host_metadata(req.context, host_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return host_detail
def get_roles_detail(req):
try:
roles = registry.get_roles_detail(req.context)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return roles
def get_hosts_of_role(req, role_id):
try:
hosts = registry.get_role_host_metadata(req.context, role_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return hosts
def get_role_detail(req, role_id):
try:
role = registry.get_role_metadata(req.context, role_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return role
def update_role(req, role_id, role_meta):
try:
registry.update_role_metadata(req.context, role_id, role_meta)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
def update_role_host(req, role_id, role_host):
try:
registry.update_role_host_metadata(req.context, role_id, role_host)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
def delete_role_hosts(req, role_id):
try:
registry.delete_role_host_metadata(req.context, role_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
def _get_cluster_network(cluster_networks, network_type):
network = [cn for cn in cluster_networks
if cn['name'] in network_type]
if not network or not network[0]:
msg = "network %s is not exist" % (network_type)
raise exception.InvalidNetworkConfig(msg)
else:
return network[0]
def get_host_interface_by_network(host_detail, network_type):
host_detail_info = copy.deepcopy(host_detail)
interface_list = [hi for hi in host_detail_info['interfaces']
for assigned_network in hi['assigned_networks']
if assigned_network and
network_type == assigned_network['name']]
interface = {}
if interface_list:
interface = interface_list[0]
if not interface:
msg = "network %s of host %s is not exist" % (
network_type, host_detail_info['id'])
raise exception.InvalidNetworkConfig(msg)
return interface
def get_host_network_ip(req, host_detail, cluster_networks, network_type):
interface_network_ip = ''
host_interface = get_host_interface_by_network(host_detail, network_type)
if host_interface:
network = _get_cluster_network(cluster_networks, network_type)
assigned_network = daisy_cmn.get_assigned_network(req,
host_interface['id'],
network['id'])
interface_network_ip = assigned_network['ip']
if not interface_network_ip:
msg = "%s network ip of host %s can't be empty" % (
network_type, host_detail['id'])
raise exception.InvalidNetworkConfig(msg)
return interface_network_ip
def get_deploy_node_cfg(req, host_detail, cluster_networks):
host_deploy_network = get_host_interface_by_network(
host_detail, 'DEPLOYMENT')
host_deploy_ip = get_host_network_ip(
req, host_detail, cluster_networks, 'DEPLOYMENT')
if not host_deploy_ip:
msg = "deployment ip of host %s can't be empty" % host_detail['id']
raise exception.InvalidNetworkConfig(msg)
host_deploy_macname = host_deploy_network['name']
if not host_deploy_macname:
msg = "deployment macname of host %s can't be empty" % host_detail[
'id']
raise exception.InvalidNetworkConfig(msg)
host_mgt_ip = get_host_network_ip(
req, host_detail, cluster_networks, 'MANAGEMENT')
if not host_mgt_ip:
msg = "management ip of host %s can't be empty" % host_detail['id']
raise exception.InvalidNetworkConfig(msg)
memmode = 'tiny'
host_memory = 0
# if host_detail.has_key('memory'):
if 'memory' in host_detail:
host_memory = (
int(host_detail['memory'][
'total'].strip().split()[0])) / (1024 * 1024)
if host_memory < 8:
memmode = 'tiny'
elif host_memory < 16:
memmode = 'small'
elif host_memory < 32:
memmode = 'medium'
else:
memmode = 'large'
deploy_node_cfg = {}
deploy_node_cfg.update({'hostid': host_detail['id']})
deploy_node_cfg.update({'hostname': host_detail['name']})
deploy_node_cfg.update({'nodeip': host_deploy_ip})
deploy_node_cfg.update({'MacName': host_deploy_macname})
deploy_node_cfg.update({'memmode': memmode})
deploy_node_cfg.update({'mgtip': host_mgt_ip})
return deploy_node_cfg
def get_roles_and_hosts_list(req, cluster_id):
roles_id_list = set()
hosts_id_list = set()
hosts_list = []
cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id)
roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
for role in roles:
if role['deployment_backend'] != daisy_cmn.zenic_backend_name:
continue
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id'])
if role_hosts:
for role_host in role_hosts:
if role_host['host_id'] not in hosts_id_list:
host = daisy_cmn.get_host_detail(req, role_host['host_id'])
host_ip = get_host_network_ip(
req, host, cluster_networks, 'MANAGEMENT')
hosts_id_list.add(host['id'])
host_cfg = {}
host_cfg['mgtip'] = host_ip
host_cfg['rootpwd'] = host['root_pwd']
hosts_list.append(host_cfg)
roles_id_list.add(role['id'])
return (roles_id_list, hosts_list)
def check_and_get_zenic_version(daisy_zenic_pkg_path):
zenic_version_pkg_file = ""
zenic_version_pkg_name = ""
get_zenic_version_pkg = "ls %s| grep ^ZENIC.*\.zip$" % daisy_zenic_pkg_path
obj = subprocess.Popen(get_zenic_version_pkg,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
if stdoutput:
zenic_version_pkg_name = stdoutput.split('\n')[0]
zenic_version_pkg_file = daisy_zenic_pkg_path + zenic_version_pkg_name
chmod_for_zenic_version = 'chmod +x %s' % zenic_version_pkg_file
daisy_cmn.subprocess_call(chmod_for_zenic_version)
return (zenic_version_pkg_file, zenic_version_pkg_name)
class ZenicShellExector():
"""
Class config task before install zenic bin.
"""
def __init__(self, mgt_ip, task_type, params={}):
self.task_type = task_type
self.mgt_ip = mgt_ip
self.params = params
self.clush_cmd = ""
self.PKG_NAME = self.params['pkg_name']
self.PKG_PATH = daisy_zenic_path + self.PKG_NAME
self.CFG_PATH = daisy_zenic_path + mgt_ip + "_zenic.conf"
self.oper_type = {
'install': self._install_pkg
}
self.oper_shell = {
'CMD_SSHPASS_PRE': "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s",
'CMD_CFG_SCP': "scp %(path)s root@%(ssh_ip)s:/etc/zenic/config" %
{'path': self.CFG_PATH, 'ssh_ip': mgt_ip},
'CMD_PKG_UNZIP': "unzip /home/workspace/%(pkg_name)s \
-d /home/workspace/PKG" % {'pkg_name': self.PKG_NAME},
'CMD_PKG_SCP': "scp %(path)s root@%(ssh_ip)s:/home/workspace/" %
{'path': self.PKG_PATH, 'ssh_ip': mgt_ip}
}
self._execute()
def _install_pkg(self):
if not os.path.exists(self.CFG_PATH):
LOG.error(_("<<<CFG %s not exist>>>" % self.CFG_PATH))
return
if not os.path.exists(self.PKG_PATH):
LOG.error(_("<<<PKG %s not exist>>>" % self.PKG_PATH))
return
self.clush_cmd = "%s;%s;%s" % \
(self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip": "", "cmd": self.oper_shell['CMD_PKG_SCP']},
self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip": "", "cmd": self.oper_shell['CMD_CFG_SCP']},
self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip": "ssh " + self.mgt_ip, "cmd": self.oper_shell[
'CMD_PKG_UNZIP']})
subprocess.check_output(
self.clush_cmd, shell=True, stderr=subprocess.STDOUT)
def _execute(self):
try:
if not self.task_type or not self.mgt_ip:
LOG.error(
_("<<<ZenicShellExector::execute, \
input params invalid!>>>"))
return
self.oper_type[self.task_type]()
except subprocess.CalledProcessError as e:
LOG.warn(
_("<<<ZenicShellExector::execute:Execute command failed! Reason\
:%s>>>" % e.output.strip()))
except Exception as e:
LOG.exception(_(e.message))
else:
LOG.info(
_("<<<ZenicShellExector::execute:Execute command:\
%s,successful!>>>" % self.clush_cmd))

View File

@ -1,130 +0,0 @@
# -*- coding: utf-8 -*-
import os
from ConfigParser import ConfigParser
default_zenic_conf_template_path = "/var/lib/daisy/zenic/"
zenic_conf_template_path = default_zenic_conf_template_path
def update_conf(zenic, key, value):
zenic.set("general", key, value)
def get_conf(zenic_conf_file, **kwargs):
result = {}
if not kwargs:
return result
zenic = ConfigParser()
zenic.optionxform = str
zenic.read(zenic_conf_file)
result = {key: zenic.get("general", kwargs.get(key, None))
for key in kwargs.keys()
if zenic.has_option("general", kwargs.get(key, None))}
return result
def get_nodeid(deploy_ip, zbp_ips):
nodeid = 0
i = 0
for ip in zbp_ips:
if deploy_ip == ip:
break
else:
i = i + 1
if i == 0:
nodeid = 1
elif i == 1:
nodeid = 256
else:
nodeid = i
return nodeid
def update_zenic_conf(config_data, cluster_conf_path):
print "zenic config data is:"
import pprint
pprint.pprint(config_data)
daisy_zenic_path = zenic_conf_template_path
zenic_conf_template_file = os.path.join(daisy_zenic_path, "zenic.conf")
if not os.path.exists(cluster_conf_path):
os.makedirs(cluster_conf_path)
zenic = ConfigParser()
zenic.optionxform = str
zenic.read(zenic_conf_template_file)
zbpips = ''
for ip in config_data['zbp_ips']:
if not zbpips:
zbpips = ip
else:
zbpips = zbpips + ',' + ip
update_conf(zenic, 'zbpips', zbpips)
update_conf(zenic, 'zbp_node_num', config_data['zbp_node_num'])
nodelist = '1,256'
if len(config_data['zbp_ips']) > 2:
for i in range(2, len(config_data['zbp_ips'])):
nodelist = nodelist + ',' + 'i'
update_conf(zenic, 'zbpnodelist', nodelist)
zampips = ''
for ip in config_data['zamp_ips']:
if not zampips:
zampips = ip
else:
zampips = zampips + ',' + ip
update_conf(zenic, 'zampips', zampips)
update_conf(zenic, 'zamp_node_num', config_data['zamp_node_num'])
mongodbips = ''
for ip in config_data['mongodb_ips']:
if not mongodbips:
mongodbips = ip
else:
mongodbips = mongodbips + ',' + ip
update_conf(zenic, 'mongodbips', mongodbips)
update_conf(zenic, 'mongodb_node_num', config_data['mongodb_node_num'])
update_conf(zenic, 'zamp_vip', config_data['zamp_vip'])
update_conf(zenic, 'mongodb_vip', config_data['mongodb_vip'])
deploy_hosts = config_data['deploy_hosts']
for deploy_host in deploy_hosts:
nodeip = deploy_host['nodeip']
hostname = deploy_host['hostname']
MacName = deploy_host['MacName']
memmode = deploy_host['memmode']
update_conf(zenic, 'nodeip', nodeip)
update_conf(zenic, 'hostname', hostname)
update_conf(zenic, 'MacName', MacName)
update_conf(zenic, 'memmode', memmode)
nodeid = get_nodeid(nodeip, config_data['zbp_ips'])
update_conf(zenic, 'nodeid', nodeid)
if nodeip in config_data['zamp_ips']:
update_conf(zenic, 'needzamp', 'y')
else:
update_conf(zenic, 'needzamp', 'n')
zenic_conf = "%s_zenic.conf" % deploy_host['mgtip']
zenic_conf_cluster_out = os.path.join(cluster_conf_path, zenic_conf)
zenic_conf_out = os.path.join(daisy_zenic_path, zenic_conf)
zenic.write(open(zenic_conf_cluster_out, "w+"))
with open(zenic_conf_cluster_out, 'r') as fr,\
open(zenic_conf_out, 'w') as fw:
for line in fr.readlines():
fw.write(line.replace(' ', ''))
return
def test():
print("Hello, world!")

View File

@ -1,469 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for zenic API
"""
import subprocess
import time
from oslo_config import cfg
from oslo_log import log as logging
import threading
from daisy import i18n
import daisy.api.v1
from daisy.common import exception
from daisy.api.backends.zenic import config
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.zenic.common as zenic_cmn
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
install_opts = [
cfg.StrOpt('max_parallel_os_number', default=10,
help='Maximum number of hosts install os at the same time.'),
]
CONF.register_opts(install_opts)
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
host_os_status = {
'INIT': 'init',
'INSTALLING': 'installing',
'ACTIVE': 'active',
'FAILED': 'install-failed'
}
zenic_state = zenic_cmn.ZENIC_STATE
daisy_zenic_path = zenic_cmn.daisy_zenic_path
install_zenic_progress = 0.0
install_mutex = threading.Lock()
def update_progress_to_db(req, role_id_list,
status, progress_percentage_step=0.0):
"""
Write install progress and status to db,
we use global lock object 'install_mutex'
to make sure this function is thread safety.
:param req: http req.
:param role_id_list: Column neeb be update in role table.
:param status: install status.
:return:
"""
global install_mutex
global install_zenic_progress
install_mutex.acquire(True)
install_zenic_progress += progress_percentage_step
role = {}
for role_id in role_id_list:
if 0 == cmp(status, zenic_state['INSTALLING']):
role['status'] = status
role['progress'] = install_zenic_progress
if 0 == cmp(status, zenic_state['INSTALL_FAILED']):
role['status'] = status
elif 0 == cmp(status, zenic_state['ACTIVE']):
role['status'] = status
role['progress'] = 100
daisy_cmn.update_role(req, role_id, role)
install_mutex.release()
def _ping_hosts_test(ips):
ping_cmd = 'fping'
for ip in set(ips):
ping_cmd = ping_cmd + ' ' + ip
obj = subprocess.Popen(
ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
_returncode = obj.returncode
if _returncode == 0 or _returncode == 1:
ping_result = stdoutput.split('\n')
unreachable_hosts = [result.split(
)[0] for result in ping_result if result and
result.split()[2] != 'alive']
else:
msg = "ping failed beaceuse there is invlid ip in %s" % ips
raise exception.InvalidIP(msg)
return unreachable_hosts
def _check_ping_hosts(ping_ips, max_ping_times):
if not ping_ips:
LOG.info(_("no ip got for ping test"))
return ping_ips
ping_count = 0
time_step = 5
LOG.info(_("begin ping test for %s" % ','.join(ping_ips)))
while True:
if ping_count == 0:
ips = _ping_hosts_test(ping_ips)
else:
ips = _ping_hosts_test(ips)
ping_count += 1
if ips:
LOG.debug(
_("ping host %s for %s times" % (','.join(ips), ping_count)))
if ping_count >= max_ping_times:
LOG.info(_("ping host %s timeout for %ss" %
(','.join(ips), ping_count * time_step)))
return ips
time.sleep(time_step)
else:
LOG.info(_("ping host %s success" % ','.join(ping_ips)))
time.sleep(120)
LOG.info(_("120s after ping host %s success" % ','.join(ping_ips)))
return ips
def _get_host_private_networks(host_detail, cluster_private_networks_name):
host_private_networks = [hi for pn in cluster_private_networks_name
for hi in
host_detail['interfaces'] if pn in
hi['assigned_networks']]
# If port type is bond,use pci segment of member port replace pci1 & pci2
# segments of bond port
for interface_outer in host_private_networks:
if 0 != cmp(interface_outer.get('type', None), "bond"):
continue
slave1 = interface_outer.get('slave1', None)
slave2 = interface_outer.get('slave2', None)
if not slave1 or not slave2:
continue
interface_outer.pop('pci')
for interface_inner in host_detail['interfaces']:
if 0 == cmp(interface_inner.get('name', None), slave1):
interface_outer['pci1'] = interface_inner['pci']
elif 0 == cmp(interface_inner.get('name', None), slave2):
interface_outer['pci2'] = interface_inner['pci']
return host_private_networks
def get_cluster_zenic_config(req, cluster_id):
LOG.info(_("get zenic config from database..."))
# params = dict(limit=1000000)
zenic_config = {}
deploy_hosts = []
deploy_host_cfg = {}
mgt_ip = ''
zbp_ip_list = set()
mgt_ip_list = set()
zamp_ip_list = set()
zamp_vip = ''
mongodb_ip_list = set()
mongodb_vip = ''
cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id)
all_roles = zenic_cmn.get_roles_detail(req)
roles = [role for role in all_roles if (role['cluster_id'] ==
cluster_id and role[
'deployment_backend'] ==
daisy_cmn.zenic_backend_name)]
for role in roles:
if not (role['name'] == 'ZENIC_CTL' or role['name'] == 'ZENIC_NFM'):
continue
if role['name'] == 'ZENIC_NFM':
if not zamp_vip:
zamp_vip = role['vip']
if not mongodb_vip:
mongodb_vip = role['mongodb_vip']
role_hosts = zenic_cmn.get_hosts_of_role(req, role['id'])
for role_host in role_hosts:
mgt_ip = ''
for deploy_host in deploy_hosts:
if role_host['host_id'] == deploy_host['hostid']:
mgt_ip = deploy_host['mgtip']
deploy_ip = deploy_host['nodeip']
break
if not mgt_ip:
host_detail = zenic_cmn.get_host_detail(
req, role_host['host_id'])
deploy_host_cfg = zenic_cmn.get_deploy_node_cfg(
req, host_detail, cluster_networks)
deploy_hosts.append(deploy_host_cfg)
mgt_ip = deploy_host_cfg['mgtip']
deploy_ip = deploy_host_cfg['nodeip']
mgt_ip_list.add(mgt_ip)
if role['name'] == 'ZENIC_CTL':
zbp_ip_list.add(deploy_ip)
elif role['name'] == 'ZENIC_NFM':
zamp_ip_list.add(deploy_ip)
mongodb_ip_list.add(deploy_ip)
else:
LOG.warn(
_("<<<Zenic Install role %s is invalid >>>"
% role['name']))
zenic_config.update({'deploy_hosts': deploy_hosts})
zenic_config.update({'zbp_ips': zbp_ip_list})
zenic_config.update({'zbp_node_num': len(zbp_ip_list)})
zenic_config.update({'zamp_ips': zamp_ip_list})
zenic_config.update({'zamp_node_num': len(zamp_ip_list)})
zenic_config.update({'mongodb_ips': mongodb_ip_list})
zenic_config.update({'mongodb_node_num': len(mongodb_ip_list)})
zenic_config.update({'zamp_vip': zamp_vip})
zenic_config.update({'mongodb_vip': mongodb_vip})
return (zenic_config, mgt_ip_list)
def generate_zenic_config_file(cluster_id, zenic_config):
LOG.info(_("generate zenic config..."))
if zenic_config:
cluster_conf_path = daisy_zenic_path + cluster_id
config.update_zenic_conf(zenic_config, cluster_conf_path)
def thread_bin(req, host, role_id_list, pkg_name, install_progress_percentage):
host_ip = host['mgtip']
password = host['rootpwd']
cmd = 'mkdir -p /var/log/daisy/daisy_install/'
daisy_cmn.subprocess_call(cmd)
var_log_path =\
"/var/log/daisy/daisy_install/%s_install_zenic.log" % host_ip
with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s mkdir -p /home/workspace' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s mkdir -p /etc/zenic' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s rm -rf /etc/zenic/config' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s rm -rf /home/zenic' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s rm -rf /home/workspace/unipack' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp)
pkg_file = daisy_zenic_path + pkg_name
cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % (
host_ip, pkg_name)
daisy_cmn.subprocess_call(cmd, fp)
cfg_file = daisy_zenic_path + host_ip + "_zenic.conf"
try:
exc_result = subprocess.check_output(
'sshpass -p ossdbg1 scp %s root@%s:/etc/zenic/config' % (
cfg_file, host_ip,),
shell=True, stderr=fp)
except subprocess.CalledProcessError as e:
update_progress_to_db(
req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("scp zenic pkg for %s failed!" % host_ip))
fp.write(e.output.strip())
exit()
else:
LOG.info(_("scp zenic config for %s successfully!" % host_ip))
fp.write(exc_result)
try:
exc_result = subprocess.check_output(
'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % (
pkg_file, host_ip,),
shell=True, stderr=fp)
except subprocess.CalledProcessError as e:
update_progress_to_db(
req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("scp zenic pkg for %s failed!" % host_ip))
fp.write(e.output.strip())
exit()
else:
LOG.info(_("scp zenic pkg for %s successfully!" % host_ip))
fp.write(exc_result)
cmd = 'clush -S -b -w %s unzip /home/workspace/%s \
-d /home/workspace/unipack' % (
host_ip, pkg_name,)
daisy_cmn.subprocess_call(cmd)
try:
exc_result = subprocess.check_output(
'clush -S -b -w %s /home/workspace/unipack/node_install.sh'
% (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_progress_to_db(
req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("install zenic for %s failed!" % host_ip))
fp.write(e.output.strip())
exit()
else:
LOG.info(_("install zenic for %s successfully!" % host_ip))
fp.write(exc_result)
try:
exc_result = subprocess.check_output(
'clush -S -b -w %s /home/zenic/node_start.sh' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_progress_to_db(
req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("start zenic for %s failed!" % host_ip))
fp.write(e.output.strip())
exit()
else:
update_progress_to_db(
req, role_id_list, zenic_state['INSTALLING'],
install_progress_percentage)
LOG.info(_("start zenic for %s successfully!" % host_ip))
fp.write(exc_result)
class ZENICInstallTask(Thread):
"""
Class for install tecs bin.
"""
""" Definition for install states."""
INSTALL_STATES = {
'INIT': 'init',
'INSTALLING': 'installing',
'ACTIVE': 'active',
'FAILED': 'install-failed'
}
def __init__(self, req, cluster_id):
super(ZENICInstallTask, self).__init__()
self.req = req
self.cluster_id = cluster_id
self.progress = 0
self.state = ZENICInstallTask.INSTALL_STATES['INIT']
self.message = ""
self.zenic_config_file = ''
self.mgt_ip_list = ''
self.install_log_fp = None
self.last_line_num = 0
self.need_install = False
self.ping_times = 36
self.log_file = "/var/log/daisy/zenic_%s_install.log" % self.cluster_id
def run(self):
try:
self._run()
except (exception.InstallException,
exception.NotFound,
exception.InstallTimeoutException) as e:
LOG.exception(e.message)
else:
if not self.need_install:
return
self.progress = 100
self.state = zenic_state['ACTIVE']
self.message = "Zenic install successfully"
LOG.info(_("install Zenic for cluster %s successfully."
% self.cluster_id))
def _run(self):
(zenic_config, self.mgt_ip_list) = get_cluster_zenic_config(
self.req, self.cluster_id)
if not self.mgt_ip_list:
msg = _("there is no host in cluster %s") % self.cluster_id
raise exception.ThreadBinException(msg)
unreached_hosts = _check_ping_hosts(self.mgt_ip_list, self.ping_times)
if unreached_hosts:
self.state = zenic_state['INSTALL_FAILED']
self.message = "hosts %s ping failed" % unreached_hosts
raise exception.NotFound(message=self.message)
generate_zenic_config_file(self.cluster_id, zenic_config)
# check and get ZENIC version
(zenic_version_pkg_file, zenic_version_pkg_name) =\
zenic_cmn.check_and_get_zenic_version(
daisy_zenic_path)
if not zenic_version_pkg_file:
self.state = zenic_state['INSTALL_FAILED']
self.message = \
"ZENIC version file not found in %s" % daisy_zenic_path
raise exception.NotFound(message=self.message)
(role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(
self.req, self.cluster_id)
update_progress_to_db(
self.req, role_id_list, zenic_state['INSTALLING'], 0.0)
install_progress_percentage = round(1 * 1.0 / len(hosts_list), 2) * 100
threads = []
for host in hosts_list:
t = threading.Thread(target=thread_bin, args=(
self.req, host, role_id_list,
zenic_version_pkg_name, install_progress_percentage))
t.setDaemon(True)
t.start()
threads.append(t)
LOG.info(_("install threads have started, please waiting...."))
try:
for t in threads:
t.join()
except:
LOG.warn(_("Join install thread %s failed!" % t))
else:
install_failed_flag = False
for role_id in role_id_list:
role = daisy_cmn.get_role_detail(self.req, role_id)
if role['progress'] == 0:
update_progress_to_db(
self.req, role_id_list, zenic_state['INSTALL_FAILED'])
install_failed_flag = True
break
if role['status'] == zenic_state['INSTALL_FAILED']:
install_failed_flag = True
break
if not install_failed_flag:
LOG.info(
_("all install threads have done, \
set all roles status to 'active'!"))
update_progress_to_db(
self.req, role_id_list, zenic_state['ACTIVE'])

View File

@ -1,94 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/hosts endpoint for Daisy v1 API
"""
import subprocess
from oslo_log import log as logging
import threading
from daisy import i18n
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.zenic.common as zenic_cmn
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
zenic_state = zenic_cmn.ZENIC_STATE
uninstall_zenic_progress = 100.0
uninstall_mutex = threading.Lock()
def update_progress_to_db(req, role_id_list, status,
progress_percentage_step=0.0):
"""
Write uninstall progress and status to db,
we use global lock object 'uninstall_mutex'
to make sure this function is thread safety.
:param req: http req.
:param role_id_list: Column neeb be update in role table.
:param status: Uninstall status.
:return:
"""
global uninstall_mutex
global uninstall_zenic_progress
uninstall_mutex.acquire(True)
uninstall_zenic_progress -= progress_percentage_step
role = {}
for role_id in role_id_list:
if 0 == cmp(status, zenic_state['UNINSTALLING']):
role['status'] = status
role['progress'] = uninstall_zenic_progress
if 0 == cmp(status, zenic_state['UNINSTALL_FAILED']):
role['status'] = status
elif 0 == cmp(status, zenic_state['INIT']):
role['status'] = status
role['progress'] = 0
daisy_cmn.update_role(req, role_id, role)
uninstall_mutex.release()
def thread_bin(req, host, role_id_list, uninstall_progress_percentage):
host_ip = host['mgtip']
password = host['rootpwd']
cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/'
daisy_cmn.subprocess_call(cmd)
var_log_path =\
"/var/log/daisy/daisy_uninstall/%s_uninstall_zenic.log" % host_ip
with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd, fp)
try:
exc_result = subprocess.check_output(
'clush -S -b -w %s /home/zenic/node_stop.sh' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_progress_to_db(
req, role_id_list, zenic_state['UNINSTALL_FAILED'])
fp.write(e.output.strip())
else:
update_progress_to_db(
req, role_id_list, zenic_state['UNINSTALLING'],
uninstall_progress_percentage)
fp.write(exc_result)

View File

@ -1,155 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/update endpoint for Daisy v1 API
"""
import subprocess
from oslo_log import log as logging
import threading
from daisy import i18n
from daisy.common import exception
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.zenic.common as zenic_cmn
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
zenic_state = zenic_cmn.ZENIC_STATE
daisy_zenic_path = zenic_cmn.daisy_zenic_path
update_zenic_progress = 0.0
update_mutex = threading.Lock()
def update_progress_to_db(req, role_id_list, status,
progress_percentage_step=0.0):
"""
Write update progress and status to db,
we use global lock object 'update_mutex'
to make sure this function is thread safety.
:param req: http req.
:param role_id_list: Column neeb be update in role table.
:param status: Update status.
:return:
"""
global update_mutex
global update_zenic_progress
update_mutex.acquire(True)
update_zenic_progress += progress_percentage_step
role = {}
for role_id in role_id_list:
if 0 == cmp(status, zenic_state['UPDATING']):
role['status'] = status
role['progress'] = update_zenic_progress
if 0 == cmp(status, zenic_state['UPDATE_FAILED']):
role['status'] = status
elif 0 == cmp(status, zenic_state['ACTIVE']):
role['status'] = status
role['progress'] = 100
daisy_cmn.update_role(req, role_id, role)
update_mutex.release()
def thread_bin(req, host, role_id_list, update_progress_percentage):
(zenic_version_pkg_file, zenic_version_pkg_name) = \
zenic_cmn.check_and_get_zenic_version(
daisy_zenic_path)
if not zenic_version_pkg_file:
# selfstate = zenic_state['INSTALL_FAILED']
selfmessage = "ZENIC version file not found in %s" % daisy_zenic_path
raise exception.NotFound(message=selfmessage)
host_ip = host['mgtip']
password = host['rootpwd']
cmd = 'mkdir -p /var/log/daisy/daisy_upgrade/'
daisy_cmn.subprocess_call(cmd)
var_log_path = \
"/var/log/daisy/daisy_upgrade/%s_upgrade_zenic.log" % host_ip
with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s /home/zenic/node_stop.sh' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % (
host_ip, zenic_version_pkg_name)
daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s rm -rf /home/workspace/unipack' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp)
try:
exc_result = subprocess.check_output(
'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % (
zenic_version_pkg_file, host_ip,),
shell=True, stderr=fp)
except subprocess.CalledProcessError as e:
update_progress_to_db(
req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("scp zenic pkg for %s failed!" % host_ip))
fp.write(e.output.strip())
exit()
else:
LOG.info(_("scp zenic pkg for %s successfully!" % host_ip))
fp.write(exc_result)
cmd = 'clush -S -b -w %s unzip /home/workspace/%s \
-d /home/workspace/unipack' % (host_ip, zenic_version_pkg_name,)
daisy_cmn.subprocess_call(cmd)
try:
exc_result = subprocess.check_output(
'clush -S -b -w %s /home/workspace/unipack/node_upgrade.sh'
% (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_progress_to_db(
req, role_id_list, zenic_state['UPDATE_FAILED'])
LOG.info(_("Upgrade zenic for %s failed!" % host_ip))
fp.write(e.output.strip())
else:
update_progress_to_db(
req, role_id_list, zenic_state['UPDATING'],
update_progress_percentage)
LOG.info(_("Upgrade zenic for %s successfully!" % host_ip))
fp.write(exc_result)
try:
exc_result = subprocess.check_output(
'clush -S -b -w %s /home/zenic/node_start.sh' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_progress_to_db(
req, role_id_list, zenic_state['UPDATE_FAILED'])
LOG.info(_("Start zenic for %s failed!" % host_ip))
fp.write(e.output.strip())
else:
update_progress_to_db(
req, role_id_list, zenic_state['UPDATING'],
update_progress_percentage)
LOG.info(_("Start zenic for %s successfully!" % host_ip))
fp.write(exc_result)

View File

@ -1,7 +1,6 @@
import subprocess
import daisy.registry.client.v1.api as registry
from daisy.api.backends.tecs import config as role_service
from oslo_log import log as logging
import webob.exc
from webob.exc import HTTPBadRequest
@ -188,7 +187,7 @@ class config_clushshell():
if service['component_id'] not in components_id:
continue
services_name = role_service.service_map.get(service['name'])
services_name = daisy_cmn.service_map.get(service['name'])
if not services_name:
msg = "Can't find service for '%s'" % service
raise HTTPBadRequest(explanation=msg)
@ -275,7 +274,7 @@ class config_clushshell():
def _role_service_restart(self, role_info, host_ip):
""" """
for service in role_info['service_name']:
services_name = role_service.service_map.get(service)
services_name = daisy_cmn.service_map.get(service)
if not services_name:
msg = "Can't find service for '%s'" % service
raise HTTPBadRequest(explanation=msg)

View File

@ -35,7 +35,7 @@ from daisy.common import wsgi
import daisy.registry.client.v1.api as registry
from daisy.api.v1 import controller
from daisy.api.v1 import filters
import daisy.api.backends.tecs.common as tecs_cmn
import daisy.api.backends.common as daisy_cmn
LOG = logging.getLogger(__name__)
@ -169,7 +169,7 @@ class Controller(controller.BaseController):
'rm -rf {0}daisy_tmp'.format(BACK_PATH)
]
tecs_cmn.run_scrip(scripts, msg='Backup file failed!')
daisy_cmn.run_scrip(scripts, msg='Backup file failed!')
return {"backup_file": BACK_PATH + backup_file_name}
@utils.mutating
@ -191,7 +191,8 @@ class Controller(controller.BaseController):
BACK_PATH),
'rm -rf {0}daisy_tmp'.format(BACK_PATH)
]
tecs_cmn.run_scrip(restore_scripts, msg='Restore failed!')
daisy_cmn.run_scrip(restore_scripts, msg='Restore failed!')
LOG.info('Restore successfully')
@utils.mutating
@ -210,7 +211,7 @@ class Controller(controller.BaseController):
file_meta['backup_file_path'], BACK_PATH)
]
tecs_cmn.run_scrip(scripts, msg='Decompression file failed!')
daisy_cmn.run_scrip(scripts, msg='Decompression file failed!')
try:
version = subprocess.check_output(
@ -222,7 +223,7 @@ class Controller(controller.BaseController):
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
tecs_cmn.run_scrip(['rm -rf {0}daisy_tmp'.format(BACK_PATH)])
daisy_cmn.run_scrip(['rm -rf {0}daisy_tmp'.format(BACK_PATH)])
return {"backup_file_version": version}
@utils.mutating

View File

@ -40,15 +40,11 @@ from daisy import notifier
import daisy.registry.client.v1.api as registry
from daisy.registry.api.v1 import template
import daisy.api.backends.tecs.common as tecs_cmn
try:
import simplejson as json
except ImportError:
import json
daisy_tecs_path = tecs_cmn.daisy_tecs_path
LOG = logging.getLogger(__name__)
_ = i18n._

View File

@ -40,7 +40,6 @@ from daisy import notifier
import daisy.registry.client.v1.api as registry
import threading
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn
import ConfigParser
import socket
import netaddr
@ -212,16 +211,6 @@ class Controller(controller.BaseController):
LOG.error(msg)
raise HTTPForbidden(msg)
def validate_mac_format(self, mac_str):
'''Validates a mac address'''
if re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$",
mac_str.lower()):
return
else:
msg = (_("%s invalid mac format!") % mac_str)
LOG.error(msg)
raise HTTPForbidden(msg)
def get_cluster_networks_info(self, req, cluster_id=None, type=None):
'''
get_cluster_networks_info by cluster id
@ -627,10 +616,7 @@ class Controller(controller.BaseController):
"""
self._enforce(req, 'get_host')
host_meta = self.get_host_meta_or_404(req, id)
if host_meta.get("hwm_id"):
self.check_discover_state_with_hwm(req, host_meta)
else:
self.check_discover_state_with_no_hwm(req, host_meta)
self.check_discover_state_with_no_hwm(req, host_meta)
host_vcpu_pin = vcpu_pin.allocate_cpus(host_meta)
host_meta.update(host_vcpu_pin)
if 'role' in host_meta and 'CONTROLLER_HA' in host_meta['role']:
@ -640,7 +626,7 @@ class Controller(controller.BaseController):
cluster_id = cluster_info[0]['id']
ctl_ha_nodes_min_mac =\
tecs_cmn.get_ctl_ha_nodes_min_mac(req, cluster_id)
daisy_cmn.get_ctl_ha_nodes_min_mac(req, cluster_id)
sorted_ha_nodes = \
sorted(ctl_ha_nodes_min_mac.iteritems(), key=lambda d: d[1])
sorted_ha_nodes_min_mac = \
@ -657,9 +643,9 @@ class Controller(controller.BaseController):
role_id = role['id']
break
service_disks = \
tecs_cmn.get_service_disk_list(req,
{'filters': {
'role_id': role_id}})
daisy_cmn.get_service_disk_list(req,
{'filters': {
'role_id': role_id}})
db_share_cluster_disk = []
service_lun_info = []
for disk in service_disks:
@ -708,35 +694,11 @@ class Controller(controller.BaseController):
try:
nodes = registry.get_hosts_detail(req.context, **params)
for node in nodes:
if node.get("hwm_id"):
self.check_discover_state_with_hwm(req, node)
else:
self.check_discover_state_with_no_hwm(req, node)
self.check_discover_state_with_no_hwm(req, node)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(nodes=nodes)
def check_discover_state_with_hwm(self, req, node):
node['discover_state'] = None
host_meta = self.get_host_meta_or_404(req, node.get('id'))
if host_meta and host_meta.get('interfaces'):
mac_list = [
interface['mac'] for interface in
host_meta.get('interfaces') if interface.get('mac')]
if mac_list:
min_mac = min(mac_list)
pxe_discover_host = self._get_discover_host_by_mac(req,
min_mac)
if pxe_discover_host:
if pxe_discover_host.get('ip'):
node['discover_state'] = \
"SSH:" + pxe_discover_host.get('status')
else:
node['discover_state'] = \
"PXE:" + pxe_discover_host.get('status')
return node
def check_discover_state_with_no_hwm(self, req, node):
node['discover_state'] = None
host_meta = self.get_host_meta_or_404(req, node.get('id'))
@ -752,59 +714,6 @@ class Controller(controller.BaseController):
return node
def _update_hwm_host(self, req, hwm_host, hosts, hwm_ip):
hwm_host_mac = [hwm_host_interface['mac'] for hwm_host_interface
in hwm_host.get('interfaces')]
for host in hosts:
host_update_meta = dict()
host_meta = self.get_host_meta_or_404(req, host['id'])
host_mac = [host_interface['mac'] for host_interface
in host_meta.get('interfaces')]
set_same_mac = set(hwm_host_mac) & set(host_mac)
if set_same_mac:
host_update_meta['hwm_id'] = hwm_host['id']
host_update_meta['hwm_ip'] = hwm_ip
node = registry.update_host_metadata(req.context, host['id'],
host_update_meta)
return node
host_add_meta = dict()
host_add_meta['name'] = str(hwm_host['id'])
host_add_meta['description'] = 'default'
host_add_meta['os_status'] = 'init'
host_add_meta['hwm_id'] = str(hwm_host['id'])
host_add_meta['hwm_ip'] = str(hwm_ip)
host_add_meta['interfaces'] = str(hwm_host['interfaces'])
node = registry.add_host_metadata(req.context, host_add_meta)
return node
def update_hwm_host(self, req, host_meta):
self._enforce(req, 'get_hosts')
params = self._get_query_params(req)
try:
hosts = registry.get_hosts_detail(req.context, **params)
hosts_without_hwm_id = list()
hosts_hwm_id_list = list()
for host in hosts:
if host.get('hwm_id'):
hosts_hwm_id_list.append(host['hwm_id'])
else:
hosts_without_hwm_id.append(host)
hwm_hosts = host_meta['nodes']
hwm_ip = host_meta['hwm_ip']
nodes = list()
for hwm_host in eval(hwm_hosts):
if hwm_host['id'] in hosts_hwm_id_list:
continue
node = self._update_hwm_host(req, hwm_host,
hosts_without_hwm_id, hwm_ip)
nodes.append(node)
return dict(nodes=nodes)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
def _compute_hugepage_memory(self, hugepages, memory, hugepagesize='1G'):
hugepage_memory = 0
if hugepagesize == '2M':
@ -1041,7 +950,7 @@ class Controller(controller.BaseController):
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
orig_mac_list = list()
if 'interfaces' in host_meta:
for interface_param in eval(host_meta['interfaces']):
if not interface_param.get('pci', None) and \
@ -1058,12 +967,9 @@ class Controller(controller.BaseController):
'vswitch_type']
raise HTTPBadRequest(explanation=msg, request=req,
content_type="text/plain")
interfaces_db = orig_host_meta.get('interfaces', None)
orig_mac_list = [interface_db['mac'] for interface_db in
interfaces_db if interface_db['mac']]
orig_pci_list = [interface_db['pci'] for interface_db in
interfaces_db if interface_db['pci']]
if interfaces_db and len(orig_pci_list):
if orig_host_meta.get('interfaces', None):
interfaces_db = orig_host_meta['interfaces']
interfaces_param = eval(host_meta['interfaces'])
interfaces_db_ether = [
interface_db for interface_db in interfaces_db if
@ -1625,17 +1531,6 @@ class Controller(controller.BaseController):
host_meta = registry.update_host_metadata(req.context, id,
host_meta)
if orig_mac_list:
orig_min_mac = min(orig_mac_list)
discover_host = self._get_discover_host_by_mac(req,
orig_min_mac)
if discover_host:
discover_host_params = {
"mac": orig_min_mac,
"status": "DISCOVERY_SUCCESSFUL"
}
self.update_pxe_host(req, discover_host['id'],
discover_host_params)
except exception.Invalid as e:
msg = (_("Failed to update host metadata. Got error: %s") %
utils.exception_to_str(e))
@ -2373,112 +2268,6 @@ class Controller(controller.BaseController):
return {'host_meta': host_meta}
def _get_discover_host_mac(self, req):
params = dict()
hosts_mac = list()
discover_hosts =\
registry.get_discover_hosts_detail(req.context, **params)
for host in discover_hosts:
if host.get('mac'):
hosts_mac.append(host['mac'])
return hosts_mac
def _get_discover_host_by_mac(self, req, host_mac):
params = dict()
discover_hosts = \
registry.get_discover_hosts_detail(req.context, **params)
LOG.info("%s" % discover_hosts)
for host in discover_hosts:
if host.get('mac') == host_mac:
return host
return
@utils.mutating
def add_pxe_host(self, req, host_meta):
"""
Adds a new pxe host to Daisy
:param req: The WSGI/Webob Request object
:param host_meta: Mapping of metadata about host
:raises HTTPBadRequest if x-host-name is missing
"""
self._enforce(req, 'add_pxe_host')
LOG.warn("host_meta: %s" % host_meta)
if not host_meta.get('mac'):
msg = "MAC parameter can not be None."
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
self.validate_mac_format(host_meta['mac'])
pxe_hosts_mac = self._get_discover_host_mac(req)
if host_meta['mac'] in pxe_hosts_mac:
host = self._get_discover_host_by_mac(req, host_meta['mac'])
host_meta = registry.update_discover_host_metadata(
req.context, host['id'], host_meta)
return {'host_meta': host_meta}
if not host_meta.get('status', None):
host_meta['status'] = 'None'
try:
pxe_host_info = \
registry.add_discover_host_metadata(req.context, host_meta)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return {'host_meta': pxe_host_info}
@utils.mutating
def update_pxe_host(self, req, id, host_meta):
"""
Update a new pxe host to Daisy
"""
self._enforce(req, 'update_pxe_host')
if not host_meta.get('mac'):
msg = "MAC parameter can not be None."
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
self.validate_mac_format(host_meta['mac'])
orig_host_meta = registry.get_discover_host_metadata(req.context, id)
try:
if host_meta['mac'] == orig_host_meta['mac']:
host_meta = registry.update_discover_host_metadata(
req.context, id, host_meta)
except exception.Invalid as e:
msg = (_("Failed to update discover host metadata. "
"Got error: %s") % utils.exception_to_str(e))
LOG.error(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find discover host to update: %s") %
utils.exception_to_str(e))
LOG.error(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update discover host: %s") %
utils.exception_to_str(e))
LOG.error(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.error(utils.exception_to_str(e))
raise HTTPConflict(body=_('Host operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('host.update', host_meta)
return {'host_meta': host_meta}
class HostDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
@ -2497,9 +2286,6 @@ class HostDeserializer(wsgi.JSONRequestDeserializer):
def discover_host(self, request):
return self._deserialize(request)
def update_hwm_host(self, request):
return self._deserialize(request)
def add_discover_host(self, request):
return self._deserialize(request)

View File

@ -1,347 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/Hwm endpoint for Daisy v1 API
"""
from oslo_config import cfg
from oslo_log import log as logging
import webob.exc
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound
from webob import Response
from daisy.api import policy
import daisy.api.v1
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy import i18n
from daisy import notifier
import daisy.registry.client.v1.api as registry
from daisy.registry.api.v1 import hwms
import daisy.api.backends.tecs.common as tecs_cmn
daisy_tecs_path = tecs_cmn.daisy_tecs_path
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = hwms.SUPPORTED_PARAMS
SUPPORTED_FILTERS = hwms.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController):
"""
WSGI controller for hwms resource in Daisy v1 API
The Templates resource API is a RESTful web Template for Template data.
The API is as follows::
GET /Templates -- Returns a set of brief metadata about Templates
GET /Templates/detail -- Returns a set of detailed metadata about
Templates
HEAD /Templates/<ID> -- Return metadata about an Template with id <ID>
GET /Templates/<ID> -- Return Template data for Template with id <ID>
POST /Templates -- Store Template data and return metadata about the
newly-stored Template
PUT /Templates/<ID> -- Update Template metadata and/or upload Template
data for a previously-reserved Template
DELETE /Templates/<ID> -- Delete the Template with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']:
msg = _("Cluster with identifier %s has been deleted.") % \
cluster_id
raise webob.exc.HTTPNotFound(msg)
def get_clusters_hwm_ip(self, req):
params = self._get_query_params(req)
clusters_hwm_ip = list()
clusters = registry.get_clusters_detail(req.context, **params)
for cluster in clusters:
clusters_hwm_ip.append(cluster.get('hwm_ip'))
return clusters_hwm_ip
@utils.mutating
def add_hwm(self, req, hwm):
"""
Adds a new hwm to Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about Template
:raises HTTPBadRequest if x-Template-name is missing
"""
self._enforce(req, 'add_template')
hwm = registry.add_hwm_metadata(req.context, hwm)
return {'hwm': hwm}
@utils.mutating
def update_hwm(self, req, id, hwm):
"""
Updates an existing hwm with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'update_hwm')
hwm_meta = registry.hwm_detail_metadata(req.context, id)
hwm_ip = hwm_meta['hwm_ip']
clusters_hwm_ip = self.get_clusters_hwm_ip(req)
if hwm_ip in clusters_hwm_ip:
msg = (_("Hwm %s has already used in cluster, "
"it can not be update. " % hwm_ip))
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
try:
hwm = registry.update_hwm_metadata(req.context, id, hwm)
except exception.Invalid as e:
msg = (_("Failed to update hwm metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find hwm to update: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update hwm: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('hwm operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('hwm.update', hwm)
return {'hwm': hwm}
@utils.mutating
def delete_hwm(self, req, id):
"""
delete a existing hwm template with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'delete_hwm')
hwm_meta = registry.hwm_detail_metadata(req.context, id)
hwm_ip = hwm_meta['hwm_ip']
clusters_hwm_ip = self.get_clusters_hwm_ip(req)
if hwm_ip in clusters_hwm_ip:
msg = (_("Hwm %s has already used in cluster, "
"it can not be deleted. " % hwm_ip))
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
try:
registry.delete_hwm_metadata(req.context, id)
except exception.NotFound as e:
msg = (_("Failed to find hwm to delete: %s") %
utils.exception_to_str(e))
LOG.error(msg)
raise HTTPNotFound(explanation=msg, request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete hwm: %s") %
utils.exception_to_str(e))
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_(
"hwm %(id)s could not be deleted because it is in "
"use:%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.error(msg)
raise HTTPConflict(explanation=msg, request=req,
content_type="text/plain")
else:
return Response(body='', status=200)
@utils.mutating
def detail(self, req, id):
"""
delete a existing hwm with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifie
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'detail')
context = req.context
try:
hwm_meta = registry.hwm_detail_metadata(context, id)
except exception.NotFound:
msg = "Hwm with identifier %s not found" % id
LOG.debug(msg)
raise webob.exc.HTTPNotFound(
msg, request=req, content_type='text/plain')
except exception.Forbidden:
msg = "Forbidden hwm access"
LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg,
request=req,
content_type='text/plain')
return {'hwm': hwm_meta}
@utils.mutating
def list(self, req):
self._enforce(req, 'list')
params = self._get_query_params(req)
try:
hwm_list = registry.hwm_list_metadata(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(hwm=hwm_list)
class HwmDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result["hwm"] = utils.get_hwm_meta(request)
return result
def add_hwm(self, request):
return self._deserialize(request)
def update_hwm(self, request):
return self._deserialize(request)
class HwmSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def add_hwm(self, response, result):
hwm = result['hwm']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(hwm=hwm))
return response
def delete_hwm(self, response, result):
hwm = result['hwm']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(hwm=hwm))
return response
def get_detail(self, response, result):
hwm = result['hwm']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(hwm=hwm))
return response
def update_hwm(self, response, result):
hwm = result['hwm']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(hwm=hwm))
return response
def create_resource():
"""Templates resource factory method"""
deserializer = HwmDeserializer()
serializer = HwmSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -29,7 +29,6 @@ from daisy.api.v1 import networks
from daisy.api.v1 import install
from daisy.api.v1 import disk_array
from daisy.api.v1 import host_template
from daisy.api.v1 import hwms
from daisy.common import wsgi
from daisy.api.v1 import backup_restore
@ -153,11 +152,6 @@ class API(wsgi.Router):
action='get_host',
conditions={'method': ['GET']})
mapper.connect("/hwm_nodes",
controller=hosts_resource,
action='update_hwm_host',
conditions={'method': ['POST']})
mapper.connect("/discover_host/",
controller=hosts_resource,
action='discover_host',
@ -198,30 +192,6 @@ class API(wsgi.Router):
action='update_pxe_host',
conditions={'method': ['PUT']})
hwms_resource = hwms.create_resource()
mapper.connect("/hwm",
controller=hwms_resource,
action='add_hwm',
conditions={'method': ['POST']})
mapper.connect("/hwm/{id}",
controller=hwms_resource,
action='delete_hwm',
conditions={'method': ['DELETE']})
mapper.connect("/hwm/{id}",
controller=hwms_resource,
action='update_hwm',
conditions={'method': ['PUT']})
mapper.connect("/hwm",
controller=hwms_resource,
action='list',
conditions={'method': ['GET']})
mapper.connect("/hwm/{id}",
controller=hwms_resource,
action='detail',
conditions={'method': ['GET']})
clusters_resource = clusters.create_resource()
mapper.connect("/clusters",

View File

@ -40,10 +40,10 @@ from daisy import notifier
import daisy.registry.client.v1.api as registry
from daisy.registry.api.v1 import template
import daisy.api.backends.tecs.common as tecs_cmn
import daisy.api.backends.common as daisy_cmn
daisy_tecs_path = tecs_cmn.daisy_tecs_path
#TODO (huzhj) move it into common sub module
daisy_path = '/var/lib/daisy/'
LOG = logging.getLogger(__name__)
@ -396,7 +396,7 @@ class Controller(controller.BaseController):
template_detail['hosts'] = json.loads(
template_detail['hosts'])
tecs_json = daisy_tecs_path + "%s.json" % template_name
tecs_json = daisy_path + "%s.json" % template_name
cmd = 'rm -rf %s' % (tecs_json,)
daisy_cmn.subprocess_call(cmd)
with open(tecs_json, "w+") as fp:

View File

@ -348,13 +348,6 @@ def get_host_meta(response):
return result
def get_hwm_meta(response):
result = {}
for key, value in response.json.items():
result[key] = value
return result
def get_cluster_meta(response):
result = {}
for key, value in response.json.items():

View File

@ -6068,127 +6068,6 @@ def cinder_volume_list(context, filters=None, **param):
cinder_volume_ref = _cinder_volume_get(context, role_id=role_id)
return cinder_volume_ref
@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500,
stop_max_attempt_number=50)
def hwm_add(context, values):
"""add hwm to daisy."""
return _hwm_update(context, values, None)
@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500,
stop_max_attempt_number=50)
def hwm_update(context, hwm_id, values):
"""update cluster template to daisy."""
return _hwm_update(context, values, hwm_id)
def _hwm_update(context, values, hwm_id):
"""update or add hwm to daisy."""
values = values.copy()
session = get_session()
with session.begin():
if hwm_id:
hwm_ref = _hwm_get(context, hwm_id, session=session)
else:
hwm_ref = models.Hwm()
if hwm_id:
# Don't drop created_at if we're passing it in...
_drop_protected_attrs(models.Hwm, values)
# NOTE(iccha-sethi): updated_at must be explicitly set in case
# only ImageProperty table was modifited
values['updated_at'] = timeutils.utcnow()
if hwm_id:
if values.get('id', None): del values['id']
hwm_ref.update(values)
_update_values(hwm_ref, values)
try:
hwm_ref.save(session=session)
except db_exception.DBDuplicateEntry:
raise exception.Duplicate("Node ID %s already exists!"
% values['id'])
else:
hwm_ref.update(values)
_update_values(hwm_ref, values)
try:
hwm_ref.save(session=session)
except db_exception.DBDuplicateEntry:
raise exception.Duplicate("Node ID %s already exists!"
% values['id'])
return hwm_get(context, hwm_ref.id)
def hwm_destroy(context, hwm_id, session=None, force_show_deleted=False):
session = session or get_session()
with session.begin():
hwm_ref = _hwm_get(context, hwm_id, session=session)
hwm_ref.delete(session=session)
return hwm_ref
def _hwm_get(context, hwm_id, session=None, force_show_deleted=False):
"""Get an hwm or raise if it does not exist."""
session = session or get_session()
try:
query = session.query(models.Hwm).filter_by(id=hwm_id)
# filter out deleted images if context disallows it
if not force_show_deleted and not context.can_see_deleted:
query = query.filter_by(deleted=False)
hwm = query.one()
return hwm
except sa_orm.exc.NoResultFound:
msg = "No hwm found with ID %s" % hwm_id
LOG.debug(msg)
raise exception.NotFound(msg)
def hwm_get(context, hwm_id, session=None, force_show_deleted=False):
hwm = _hwm_get(context, hwm_id, session=session,
force_show_deleted=force_show_deleted)
return hwm
def hwm_get_all(context, filters=None, marker=None, limit=None, sort_key=None,
sort_dir=None):
sort_key = ['created_at'] if not sort_key else sort_key
default_sort_dir = 'desc'
if not sort_dir:
sort_dir = [default_sort_dir] * len(sort_key)
elif len(sort_dir) == 1:
default_sort_dir = sort_dir[0]
sort_dir *= len(sort_key)
filters = filters or {}
showing_deleted = 'changes-since' in filters or filters.get('deleted',
False)
marker_hwm = None
if marker is not None:
marker_hwm = _hwm_get(context, marker,
force_show_deleted=showing_deleted)
for key in ['created_at', 'id']:
if key not in sort_key:
sort_key.append(key)
sort_dir.append(default_sort_dir)
session = get_session()
query = session.query(models.Hwm).filter_by(deleted=showing_deleted)
query = _paginate_query(query, models.Hwm, limit, sort_key,
marker=marker_hwm,
sort_dir=None,
sort_dirs=sort_dir)
hwms = []
for hwm in query.all():
hwm = hwm.to_dict()
hwms.append(hwm)
return hwms
@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500,
stop_max_attempt_number=50)
def template_add(context, values):

View File

@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
# Note: hwm (hardware management) table is currently not used. This is just
# a place holder.
from sqlalchemy import MetaData, Table, Column, String
from daisy.db.sqlalchemy.migrate_repo.schema import (Boolean, DateTime, Text,
create_tables)

View File

@ -116,16 +116,6 @@ class DaisyBase(models.ModelBase, models.TimestampMixin):
return d
class Hwm(BASE, DaisyBase):
"""Represents an hwm in the datastore."""
__tablename__ = 'hwm'
__table_args__ = (Index('ix_hwm_deleted', 'deleted'),)
description = Column(Text)
hwm_ip = Column(String(36), nullable=True)
class Host(BASE, DaisyBase):
"""Represents an host in the datastore."""
@ -152,8 +142,6 @@ class Host(BASE, DaisyBase):
messages = Column(Text)
hugepagesize = Column(String(36))
hugepages = Column(Integer(), default=0)
hwm_id = Column(String(36))
hwm_ip = Column(String(256))
vcpu_pin_set = Column(String(255))
dvs_high_cpuset = Column(String(255))
pci_high_cpuset = Column(String(255))
@ -205,7 +193,6 @@ class Cluster(BASE, DaisyBase):
segmentation_type = Column(String(64))
auto_scale = Column(Integer(), nullable=False, default=0)
use_dns = Column(Integer(), nullable=False, default=0)
hwm_ip = Column(String(256))
class ClusterHost(BASE, DaisyBase):
@ -620,7 +607,7 @@ class CinderVolume(BASE, DaisyBase):
def register_models(engine):
"""Create database tables for all models with the given engine."""
models = (Hwm, Host, DiscoverHost, Cluster, ClusterHost, Template,
models = (Host, DiscoverHost, Cluster, ClusterHost, Template,
HostTemplate, HostInterface, Network, IpRange, HostRole,
Role, ServiceRole, Service, Component, ConfigSet, Config,
ConfigFile, ConfigSetItem, ConfigHistory, Task, TaskInfo,
@ -633,7 +620,7 @@ def register_models(engine):
def unregister_models(engine):
"""Drop database tables for all models with the given engine."""
models = (Hwm, Host, DiscoverHost, Cluster, ClusterHost, Template,
models = (Host, DiscoverHost, Cluster, ClusterHost, Template,
HostTemplate, HostInterface, Network, IpRange, HostRole,
Role, ServiceRole, Service, Component, ConfigSet, Config,
ConfigFile, ConfigSetItem, ConfigHistory, Task, TaskInfo,

View File

@ -23,7 +23,6 @@ from daisy.registry.api.v1 import configs
from daisy.registry.api.v1 import networks
from daisy.registry.api.v1 import disk_array
from daisy.registry.api.v1 import template
from daisy.registry.api.v1 import hwms
def init(mapper):
@ -52,33 +51,6 @@ def init(mapper):
action="get_host_clusters",
conditions={'method': ['GET']})
hwms_resource = hwms.create_resource()
mapper.connect("/hwm",
controller=hwms_resource,
action="add_hwm",
conditions={'method': ['POST']})
mapper.connect("/hwm/{id}",
controller=hwms_resource,
action="delete_hwm",
conditions={'method': ['DELETE']})
mapper.connect("/hwm/{id}",
controller=hwms_resource,
action="update_hwm",
conditions={'method': ['PUT']})
mapper.connect("/hwm",
controller=hwms_resource,
action="hwm_list",
conditions={'method': ['GET']})
mapper.connect("/hwm/{id}",
controller=hwms_resource,
action="detail",
conditions=dict(method=["GET"]))
hosts_resource = hosts.create_resource()
mapper.connect("/nodes",

View File

@ -30,10 +30,6 @@ from daisy.common import wsgi
import daisy.db
from daisy import i18n
from daisyclient import client as daisy_client
from daisy.registry.api.v1 import hwms as registry_hwm
import ConfigParser
reload(sys)
sys.setdefaultencoding('utf-8')
@ -67,19 +63,6 @@ class Controller(object):
def __init__(self):
self.db_api = daisy.db.get_api()
self.daisyclient = self.get_daisyclient()
@staticmethod
def get_daisyclient():
"""Get Daisy client instance."""
config_daisy = ConfigParser.ConfigParser()
config_daisy.read("/etc/daisy/daisy-api.conf")
daisy_port = config_daisy.get("DEFAULT", "bind_port")
args = {
'version': 1.0,
'endpoint': 'http://127.0.0.1:' + daisy_port
}
return daisy_client.Client(**args)
def _get_hosts(self, context, filters, **params):
"""Get hosts, wrapping in exception if necessary."""
@ -403,17 +386,10 @@ class Controller(object):
except Exception:
LOG.exception(_LE("Unable to show host %s") % id)
raise
param = dict()
param['hwm_ip'] = host_data.hwm_ip
param['hwm_id'] = host_data.hwm_id
controller = registry_hwm.Controller()
hwms = controller.hwm_list(req)
hwms_ip = [hwm['hwm_ip'] for hwm in hwms]
if param['hwm_ip'] in hwms_ip:
result = self.daisyclient.node.location(**param)
location = str(result.rack) + '/' + str(result.position)
else:
location = ""
# Currently not used
location = ""
host_interface = self.db_api.get_host_interface(req.context, id)
role_name = []

View File

@ -1,355 +0,0 @@
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Reference implementation registry server WSGI controller
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import timeutils
from webob import exc
from daisy.common import exception
from daisy.common import utils
from daisy.common import wsgi
import daisy.db
from daisy import i18n
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
CONF = cfg.CONF
DISPLAY_FIELDS_IN_INDEX = ['id', 'description']
SUPPORTED_FILTERS = ['name', 'description']
SUPPORTED_SORT_KEYS = ('name', 'description''id', 'created_at', 'updated_at')
SUPPORTED_SORT_DIRS = ('asc', 'desc')
SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir', 'name',
'description')
class Controller(object):
def __init__(self):
self.db_api = daisy.db.get_api()
def _get_query_params(self, req):
"""Extract necessary query parameters from http request.
:param req: the Request object coming from the wsgi layer
:retval dictionary of filters to apply to list of templates
"""
params = {
'filters': self._get_filters(req),
'limit': self._get_limit(req),
'sort_key': [self._get_sort_key(req)],
'sort_dir': [self._get_sort_dir(req)],
'marker': self._get_marker(req),
}
for key, value in params.items():
if value is None:
del params[key]
return params
def _get_filters(self, req):
"""Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
filters = {}
properties = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
filters[param] = req.params.get(param)
if param.startswith('property-'):
_param = param[9:]
properties[_param] = req.params.get(param)
if 'changes-since' in filters:
isotime = filters['changes-since']
try:
filters['changes-since'] = timeutils.parse_isotime(isotime)
except ValueError:
raise exc.HTTPBadRequest(_("Unrecognized changes-since value"))
if 'protected' in filters:
value = self._get_bool(filters['protected'])
if value is None:
raise exc.HTTPBadRequest(_("protected must be True, or "
"False"))
filters['protected'] = value
# only allow admins to filter on 'deleted'
if req.context.is_admin:
deleted_filter = self._parse_deleted_filter(req)
if deleted_filter is not None:
filters['deleted'] = deleted_filter
elif 'changes-since' not in filters:
filters['deleted'] = False
elif 'changes-since' not in filters:
filters['deleted'] = False
if properties:
filters['properties'] = properties
return filters
def _get_limit(self, req):
"""Parse a limit query param into something usable."""
try:
limit = int(req.params.get('limit', CONF.limit_param_default))
except ValueError:
raise exc.HTTPBadRequest(_("limit param must be an integer"))
if limit < 0:
raise exc.HTTPBadRequest(_("limit param must be positive"))
return min(CONF.api_limit_max, limit)
def _get_marker(self, req):
"""Parse a marker query param into something usable."""
marker = req.params.get('marker', None)
if marker and not utils.is_uuid_like(marker):
msg = _('Invalid marker format')
raise exc.HTTPBadRequest(explanation=msg)
return marker
def _get_sort_key(self, req):
"""Parse a sort key query param from the request object."""
sort_key = req.params.get('sort_key', 'created_at')
if sort_key is not None and sort_key not in SUPPORTED_SORT_KEYS:
_keys = ', '.join(SUPPORTED_SORT_KEYS)
msg = _("Unsupported sort_key. Acceptable values: %s") % (_keys,)
raise exc.HTTPBadRequest(explanation=msg)
return sort_key
def _get_sort_dir(self, req):
"""Parse a sort direction query param from the request object."""
sort_dir = req.params.get('sort_dir', 'desc')
if sort_dir is not None and sort_dir not in SUPPORTED_SORT_DIRS:
_keys = ', '.join(SUPPORTED_SORT_DIRS)
msg = _("Unsupported sort_dir. Acceptable values: %s") % (_keys,)
raise exc.HTTPBadRequest(explanation=msg)
return sort_dir
def _get_bool(self, value):
value = value.lower()
if value == 'true' or value == '1':
return True
elif value == 'false' or value == '0':
return False
return None
def _parse_deleted_filter(self, req):
"""Parse deleted into something usable."""
deleted = req.params.get('deleted')
if deleted is None:
return None
return strutils.bool_from_string(deleted)
@utils.mutating
def add_hwm(self, req, body):
"""Registers a new hwm with the registry.
:param req: wsgi Request object
:param body: Dictionary of information about the templatae
:retval Returns the newly-created template information as a mapping,
which will include the newly-created template's internal id
in the 'id' field
"""
hwm_data = body["hwm"]
id = hwm_data.get('id')
if id and not utils.is_uuid_like(id):
msg = _LI("Rejecting hwm creation request for invalid hwm "
"id '%(bad_id)s'") % {'bad_id': id}
LOG.info(msg)
msg = _("Invalid hwm id format")
return exc.HTTPBadRequest(explanation=msg)
try:
hwm_data = self.db_api.hwm_add(req.context, hwm_data)
msg = (_LI("Successfully created hwm %s") %
hwm_data["id"])
LOG.info(msg)
if 'hwm' not in hwm_data:
hwm_data = dict(hwm=hwm_data)
return hwm_data
except exception.Duplicate:
msg = _("hwm with identifier %s already exists!") % id
LOG.warn(msg)
return exc.HTTPConflict(msg)
except exception.Invalid as e:
msg = (_("Failed to add hwm metadata. "
"Got error: %s") % utils.exception_to_str(e))
LOG.error(msg)
return exc.HTTPBadRequest(msg)
except Exception:
LOG.exception(_LE("Unable to create hwm %s"), id)
raise
@utils.mutating
def update_hwm(self, req, id, body):
"""Registers a new hwm with the registry.
:param req: wsgi Request object
:param body: Dictionary of information about the template
:retval Returns the newly-created template information as a mapping,
which will include the newly-created template's internal id
in the 'id' field
"""
hwm_data = body["hwm"]
if id and not utils.is_uuid_like(id):
msg = _LI("Rejecting cluster hwm creation request for invalid "
"hwm id '%(bad_id)s'") % {'bad_id': id}
LOG.info(msg)
msg = _("Invalid hwm id format")
return exc.HTTPBadRequest(explanation=msg)
try:
hwm_data = self.db_api.hwm_update(req.context, id, hwm_data)
msg = (_LI("Successfully updated hwm %s") %
hwm_data["id"])
LOG.info(msg)
if 'hwm' not in hwm_data:
hwm_data = dict(hwm=hwm_data)
return hwm_data
except exception.Duplicate:
msg = _("hwm with identifier %s already exists!") % id
LOG.warn(msg)
return exc.HTTPConflict(msg)
except exception.Invalid as e:
msg = (_("Failed to update hwm metadata.Got error: %s") %
utils.exception_to_str(e))
LOG.error(msg)
return exc.HTTPBadRequest(msg)
except Exception:
LOG.exception(_LE("Unable to update hwm %s"), id)
raise
@utils.mutating
def delete_hwm(self, req, id):
"""Registers a new hwm with the registry.
:param req: wsgi Request object
:param body: Dictionary of information about the template
:retval Returns the newly-created template information as a mapping,
which will include the newly-created template's internal id
in the 'id' field
"""
if id and not utils.is_uuid_like(id):
msg = _LI("Rejecting hwm delete request for invalid hwm "
"id '%(bad_id)s'") % {'bad_id': id}
LOG.info(msg)
msg = _("Invalid hwm id format")
return exc.HTTPBadRequest(explanation=msg)
try:
hwm_data = self.db_api.hwm_destroy(req.context, id)
msg = (_LI("Successfully deleted hwm %s") % id)
LOG.info(msg)
if 'hwm' not in hwm_data:
hwm_data = dict(hwm=hwm_data)
return hwm_data
except exception.Invalid as e:
msg = (_("Failed to delete hwm metadata. "
"Got error: %s") % utils.exception_to_str(e))
LOG.error(msg)
return exc.HTTPBadRequest(msg)
except Exception:
LOG.exception(_LE("Unable to delete hwm %s"), id)
raise
@utils.mutating
def hwm_list(self, req):
params = self._get_query_params(req)
try:
filters = params.pop('filters')
marker = params.get('marker')
limit = params.get('limit')
sort_key = params.get('sort_key')
sort_dir = params.get('sort_dir')
return self.db_api.hwm_get_all(
req.context, filters=filters, marker=marker, limit=limit,
sort_key=sort_key, sort_dir=sort_dir)
except exception.NotFound:
LOG.warn(_LW("Invalid marker. hwm %(id)s could not be "
"found.") % {'id': params.get('marker')})
msg = _("Invalid marker. hwm could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.Forbidden:
LOG.warn(_LW("Access denied to hwm %(id)s but returning "
"'not found'") % {'id': params.get('marker')})
msg = _("Invalid marker. hwm could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except Exception:
LOG.exception(_LE("Unable to list hwm"))
raise
@utils.mutating
def detail(self, req, id):
"""Registers a new hwm with the registry.
:param req: wsgi Request object
:param body: Dictionary of information about the template
:retval Returns the newly-created template information as a mapping,
which will include the newly-created template's internal id
in the 'id' field
"""
if id and not utils.is_uuid_like(id):
msg = _LI("Rejecting hwm delete request for invalid hwm "
"id '%(bad_id)s'") % {'bad_id': id}
LOG.info(msg)
msg = _("Invalid hwm id format")
return exc.HTTPBadRequest(explanation=msg)
try:
hwm_data = self.db_api.hwm_get(req.context, id)
msg = (_LI("Successfully get hwm information:%s") % id)
LOG.info(msg)
if 'hwm' not in hwm_data:
hwm_data = dict(hwm=hwm_data)
return hwm_data
except exception.Invalid as e:
msg = (_("Failed to get hwm metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.error(msg)
return exc.HTTPBadRequest(msg)
except Exception:
LOG.exception(_LE("Unable to get hwm %s"), id)
raise
def create_resource():
"""Hwms resource factory method."""
deserializer = wsgi.JSONRequestDeserializer()
serializer = wsgi.JSONResponseSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -328,31 +328,6 @@ def add_component_metadata(context, component_meta):
return c.add_component(component_meta)
def add_hwm_metadata(context, hwm):
c = get_registry_client(context)
return c.add_hwm(hwm)
def update_hwm_metadata(context, hwm_id, hwm):
c = get_registry_client(context)
return c.update_hwm(hwm_id, hwm)
def delete_hwm_metadata(context, hwm_id):
c = get_registry_client(context)
return c.delete_hwm(hwm_id)
def hwm_list_metadata(context, **kwargs):
c = get_registry_client(context)
return c.list_hwm(**kwargs)
def hwm_detail_metadata(context, hwm_id):
c = get_registry_client(context)
return c.get_hwm_detail(hwm_id)
def add_template_metadata(context, template):
c = get_registry_client(context)
return c.add_template(template)

View File

@ -32,7 +32,6 @@ from daisy.registry.api.v1 import config_sets
from daisy.registry.api.v1 import configs
from daisy.registry.api.v1 import networks
from daisy.registry.api.v1 import template
from daisy.registry.api.v1 import hwms
LOG = logging.getLogger(__name__)
_LE = i18n._LE
@ -572,58 +571,6 @@ class RegistryClient(client.BaseClient):
data = jsonutils.loads(res.read())
return data['template']
def add_hwm(self, hwm):
""" """
headers = {
'Content-Type': 'application/json',
}
if 'hwm' not in hwm:
hwm = dict(hwm=hwm)
body = jsonutils.dumps(hwm)
res = self.do_request("POST", "/hwm", body=body, headers=headers)
# Registry returns a JSONified dict(image=image_info)
data = jsonutils.loads(res.read())
return data['hwm']
def update_hwm(self, hwm_id, hwm):
headers = {
'Content-Type': 'application/json',
}
if 'hwm' not in hwm:
hwm = dict(hwm=hwm)
body = jsonutils.dumps(hwm)
res = self.do_request(
"PUT",
"/hwm/%s" %
hwm_id,
body=body,
headers=headers)
# Registry returns a JSONified dict(image=image_info)
data = jsonutils.loads(res.read())
return data['hwm']
def delete_hwm(self, hwm_id):
res = self.do_request("DELETE", "/hwm/%s" % hwm_id)
data = jsonutils.loads(res.read())
return data['hwm']
def list_hwm(self, **kwargs):
""" """
params = self._extract_params(kwargs, hwms.SUPPORTED_PARAMS)
res = self.do_request("GET", "/hwm", params=params)
data = jsonutils.loads(res.read())
return data
def get_hwm_detail(self, hwm_id):
res = self.do_request("GET", "/hwm/%s" % hwm_id)
data = jsonutils.loads(res.read())
return data['hwm']
def add_host_template(self, template):
""" """
headers = {

View File

@ -32,8 +32,6 @@ from daisyclient.v1.uninstall import UninstallManager
from daisyclient.v1.update import UpdateManager
from daisyclient.v1.disk_array import DiskArrayManager
from daisyclient.v1.template import TemplateManager
from daisyclient.v1.hwm_nodes import NodeManager
from daisyclient.v1.hwms import HwmManager
from daisyclient.v1.backup_restore import BackupRestoreManager
@ -69,6 +67,4 @@ class Client(object):
self.update = UpdateManager(self.http_client)
self.disk_array = DiskArrayManager(self.http_client)
self.template = TemplateManager(self.http_client)
self.node = NodeManager(self.http_client)
self.hwm = HwmManager(self.http_client)
self.backup_restore = BackupRestoreManager(self.http_client)

View File

@ -28,8 +28,7 @@ UPDATE_PARAMS = (
'dns_nameservers', 'net_l23_provider', 'base_mac', 'internal_gateway',
'internal_cidr', 'external_cidr', 'gre_id_range', 'vlan_range',
'vni_range', 'segmentation_type', 'public_vip', 'logic_networks',
'networking_parameters', 'routers', 'auto_scale', 'use_dns',
'hwm_ip'
'networking_parameters', 'routers', 'auto_scale', 'use_dns'
)
CREATE_PARAMS = (
@ -37,8 +36,7 @@ CREATE_PARAMS = (
'dns_nameservers', 'net_l23_provider', 'base_mac', 'internal_gateway',
'internal_cidr', 'external_cidr', 'gre_id_range', 'vlan_range',
'vni_range', 'segmentation_type', 'public_vip', 'logic_networks',
'networking_parameters', 'routers', 'auto_scale', 'use_dns',
'hwm_ip'
'networking_parameters', 'routers', 'auto_scale', 'use_dns'
)
DEFAULT_PAGE_SIZE = 20

View File

@ -29,7 +29,7 @@ UPDATE_PARAMS = ('name', 'resource_type', 'dmi_uuid', 'role', 'cluster',
'os_status', 'interfaces', 'is_deployment',
'description', 'deleted', 'status', 'ipmi_user',
'ipmi_passwd', 'ipmi_addr', 'ip', 'status', 'user',
'passwd', 'hwm_id', 'hwm_ip', 'cluster_id',
'passwd', 'cluster_id',
'vcpu_pin_set', 'dvs_high_cpuset', 'pci_high_cpuset',
'os_cpus', 'dvs_cpus', 'config_set_id', 'system',
'cpu', 'memory', 'disk', 'devices', 'pci')
@ -38,7 +38,7 @@ CREATE_PARAMS = ('id', 'name', 'description', 'resource_type', 'dmi_uuid',
'role', 'cluster', 'os_version', 'os_status',
'interfaces', 'is_deployment', 'status', 'ipmi_user',
'ipmi_passwd', 'ipmi_addr', 'ip', 'status', 'user',
'passwd', 'hwm_id', 'hwm_ip', 'cluster_id',
'passwd', 'cluster_id',
'vcpu_pin_set', 'dvs_high_cpuset', 'pci_high_cpuset',
'os_cpus', 'dvs_cpus', 'config_set_id', 'system',
'cpu', 'memory', 'disk', 'devices', 'pci')
@ -316,23 +316,6 @@ class HostManager(base.ManagerWithFind):
return Host(self, self._format_host_meta_for_user(body))
def get_min_mac(self, hwm_id):
params = dict()
resp, body = self.client.get('/v1/nodes')
hosts = body.get('nodes')
if hosts:
for host in hosts:
if hwm_id == host.get('hwm_id'):
resp, host_body = self.client.get('/v1/nodes/%s' %
host['id'])
interfaces = host_body['host'].get('interfaces')
if interfaces:
mac_list = [interface['mac'] for interface in
interfaces if interface.get('mac')]
if mac_list:
params['mac'] = min(mac_list)
return params
def add_discover_host(self, **kwargs):
"""Add a discover host
@ -349,9 +332,6 @@ class HostManager(base.ManagerWithFind):
msg = 'create() got an unexpected keyword argument \'%s\''
raise TypeError(msg % field)
hwm_id = fields.get('hwm_id')
params = self.get_min_mac(hwm_id)
fields['mac'] = params.get('mac')
hdrs = self._host_meta_to_headers(fields)
resp, body = self.client.post('/v1/discover/nodes',

View File

@ -1,407 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import copy
from oslo_utils import encodeutils
from oslo_utils import strutils
import six
from daisyclient.common import utils
from daisyclient.openstack.common.apiclient import base
from daisyclient.common.http import HTTPClient
reload(sys)
sys.setdefaultencoding('utf-8')
DEFAULT_PAGE_SIZE = 200
SORT_DIR_VALUES = ('asc', 'desc')
SORT_KEY_VALUES = ('serialNo', 'created_at', 'updated_at', 'status')
OS_REQ_ID_HDR = 'x-openstack-request-id'
class Node(base.Resource):
def __repr__(self):
return "<Node %s>" % self._info
def update(self, **fields):
self.manager.update(self, **fields)
def delete(self, **kwargs):
return self.manager.delete(self)
def data(self, **kwargs):
return self.manager.data(self, **kwargs)
class NodeManager(base.ManagerWithFind):
resource_class = Node
def get_hwm_client(self, hwm_ip):
if hwm_ip:
endpoint = "http://" + hwm_ip + ":8089"
client = HTTPClient(endpoint)
else:
client = self.client
return client
def _list(self, url, hwm_ip, response_key, obj_class=None, body=None):
hwm_client = self.get_hwm_client(hwm_ip)
resp, body = hwm_client.get(url)
if obj_class is None:
obj_class = self.resource_class
data = body[response_key]
return ([obj_class(self, res, loaded=True) for res in data if res],
resp)
def _host_meta_from_headers(self, headers):
meta = {'properties': {}}
safe_decode = encodeutils.safe_decode
for key, value in six.iteritems(headers):
value = safe_decode(value, incoming='utf-8')
if key.startswith('x-image-meta-property-'):
_key = safe_decode(key[22:], incoming='utf-8')
meta['properties'][_key] = value
elif key.startswith('x-image-meta-'):
_key = safe_decode(key[13:], incoming='utf-8')
meta[_key] = value
for key in ['is_public', 'protected', 'deleted']:
if key in meta:
meta[key] = strutils.bool_from_string(meta[key])
return self._format_host_meta_for_user(meta)
def _host_meta_to_headers(self, fields):
headers = {}
fields_copy = copy.deepcopy(fields)
# NOTE(flaper87): Convert to str, headers
# that are not instance of basestring. All
# headers will be encoded later, before the
# request is sent.
for key, value in six.iteritems(fields_copy):
headers['%s' % key] = utils.to_str(value)
return headers
@staticmethod
def _format_host_meta_for_user(meta):
for key in ['size', 'min_ram', 'min_disk']:
if key in meta:
try:
meta[key] = int(meta[key]) if meta[key] else 0
except ValueError:
pass
return meta
def _build_params(self, parameters):
params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)}
if 'marker' in parameters:
params['marker'] = parameters['marker']
sort_key = parameters.get('sort_key')
if sort_key is not None:
if sort_key in SORT_KEY_VALUES:
params['sort_key'] = sort_key
else:
raise ValueError('sort_key must be one of the following: %s.'
% ', '.join(SORT_KEY_VALUES))
sort_dir = parameters.get('sort_dir')
if sort_dir is not None:
if sort_dir in SORT_DIR_VALUES:
params['sort_dir'] = sort_dir
else:
raise ValueError('sort_dir must be one of the following: %s.'
% ', '.join(SORT_DIR_VALUES))
filters = parameters.get('filters', {})
params.update(filters)
return params
def list(self, **kwargs):
"""Get a list of nodes.
:param page_size: number of items to request in each paginated request
:param limit: maximum number of hosts to return
:param marker: begin returning hosts that appear later in the host
list than that represented by this host id
:param filters: dict of direct comparison filters that mimics the
structure of an host object
:param return_request_id: If an empty list is provided, populate this
list with the request ID value from the header
x-openstack-request-id
:rtype: list of :class:`Host`
"""
absolute_limit = kwargs.get('limit')
page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE)
def paginate(qp, return_request_id=None):
for param, value in six.iteritems(qp):
if isinstance(value, six.string_types):
# Note(flaper87) Url encoding should
# be moved inside http utils, at least
# shouldn't be here.
#
# Making sure all params are str before
# trying to encode them
qp[param] = encodeutils.safe_decode(value)
hwm_ip = kwargs.get('hwm_ip')
url = '/api/v1.0/hardware/nodes'
nodes, resp = self._list(url, hwm_ip, "nodes")
if return_request_id is not None:
return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None))
for node in nodes:
yield node
return_request_id = kwargs.get('return_req_id', None)
params = self._build_params(kwargs)
seen = 0
while True:
seen_last_page = 0
filtered = 0
for host in paginate(params, return_request_id):
last_host = host.serialNo
if (absolute_limit is not None and
seen + seen_last_page >= absolute_limit):
# Note(kragniz): we've seen enough images
return
else:
seen_last_page += 1
yield host
seen += seen_last_page
if seen_last_page + filtered == 0:
# Note(kragniz): we didn't get any hosts in the last page
return
if absolute_limit is not None and seen >= absolute_limit:
# Note(kragniz): reached the limit of hosts to return
return
if page_size and seen_last_page + filtered < page_size:
# Note(kragniz): we've reached the last page of the hosts
return
# Note(kragniz): there are more hosts to come
params['marker'] = last_host
seen_last_page = 0
def location(self, **kwargs):
"""Get location of node."""
hwm_ip = kwargs.get('hwm_ip')
hwm_id = kwargs.get('hwm_id')
hwm_client = self.get_hwm_client(hwm_ip)
url = '/api/v1.0/hardware/nodes/%s/location' % hwm_id
resp, body = hwm_client.get(url)
return_request_id = kwargs.get('return_req_id', None)
if return_request_id is not None:
return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None))
return Node(self, self._format_host_meta_for_user(body))
def restart(self, **kwargs):
"""Restart node."""
hdrs = {}
hwm_ip = kwargs.get('hwm_ip')
hwm_id = kwargs.get('hwm_id')
hwm_client = self.get_hwm_client(hwm_ip)
url = '/api/v1.0/hardware/nodes/%s/restart_actions' % hwm_id
resp, body = hwm_client.post(url, headers=hdrs, data=hdrs)
return_request_id = kwargs.get('return_req_id', None)
if return_request_id is not None:
return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None))
return Node(self, self._format_host_meta_for_user(body))
def restart_state(self, **kwargs):
"""Get restart state of node."""
hwm_ip = kwargs.get('hwm_ip')
action_id = kwargs.get('action_id')
hwm_client = self.get_hwm_client(hwm_ip)
url = '/api/v1.0/hardware/nodes/restart_actions/%s' % action_id
resp, body = hwm_client.get(url)
return_request_id = kwargs.get('return_req_id', None)
if return_request_id is not None:
return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None))
return Node(self, self._format_host_meta_for_user(body))
def set_boot(self, **kwargs):
"""Set boot type of node."""
hdrs = {}
hwm_ip = kwargs.get('hwm_ip')
hwm_id = kwargs.get('hwm_id')
boot_type = kwargs.get('boot_type')
hwm_client = self.get_hwm_client(hwm_ip)
url = '/api/v1.0/hardware/nodes/%s/one_time_boot?from=%s' % \
(hwm_id, boot_type)
resp, body = hwm_client.post(url, headers=hdrs, data=hdrs)
return_request_id = kwargs.get('return_req_id', None)
if return_request_id is not None:
return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None))
return Node(self, self._format_host_meta_for_user(body))
def update(self, **kwargs):
"""Update hosts."""
absolute_limit = kwargs.get('limit')
page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE)
hwm_ip = kwargs.get('hwm_ip')
hwm_client = self.get_hwm_client(hwm_ip)
hwm_url = '/api/v1.0/hardware/nodes'
hwm_resp, hwm_body = hwm_client.get(hwm_url)
hwm_body['hwm_ip'] = hwm_ip
def paginate(qp, return_request_id=None):
for param, value in six.iteritems(qp):
if isinstance(value, six.string_types):
# Note(flaper87) Url encoding should
# be moved inside http utils, at least
# shouldn't be here.
#
# Making sure all params are str before
# trying to encode them
qp[param] = encodeutils.safe_decode(value)
hdrs = self._host_meta_to_headers(hwm_body)
url = '/v1/hwm_nodes'
resp, body = self.client.post(url, headers={}, data=hdrs)
obj_class = self.resource_class
hosts = [obj_class(self, res, loaded=True) for res in body['nodes']
if res]
if return_request_id is not None:
return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None))
for host in hosts:
yield host
return_request_id = kwargs.get('return_req_id', None)
params = self._build_params(kwargs)
seen = 0
while True:
seen_last_page = 0
filtered = 0
for host in paginate(params, return_request_id):
last_host = host.id
if (absolute_limit is not None and
seen + seen_last_page >= absolute_limit):
# Note(kragniz): we've seen enough images
return
else:
seen_last_page += 1
yield host
seen += seen_last_page
if seen_last_page + filtered == 0:
# Note(kragniz): we didn't get any hosts in the last page
return
if absolute_limit is not None and seen >= absolute_limit:
# Note(kragniz): reached the limit of hosts to return
return
if page_size and seen_last_page + filtered < page_size:
# Note(kragniz): we've reached the last page of the hosts
return
# Note(kragniz): there are more hosts to come
params['marker'] = last_host
seen_last_page = 0
def cloud_state(self, **kwargs):
"""To inform provider the cloud state."""
hdrs = dict()
fields = dict()
provider_ip = kwargs.pop('provider_ip')
operation = kwargs.get('operation')
fields["envName"] = kwargs.get('name')
fields["envUrl"] = kwargs.get('url')
hwm_url = '/v1/hwm'
resp, hwm_body = self.client.get(hwm_url)
hwms_ip = [hwm['hwm_ip'] for hwm in hwm_body['hwm']]
if provider_ip in hwms_ip:
url = '/api/envChangeNotification'
provider_client = self.get_hwm_client(provider_ip)
if operation == "add":
hdrs = {"add_environment": fields}
if operation == "delete":
hdrs = {"delete_environment": fields}
resp, body = provider_client.post(url, data=hdrs)
else:
return
def get_min_mac(self, hwm_id):
params = dict()
resp, body = self.client.get('/v1/nodes')
hosts = body.get('nodes')
if hosts:
for host in hosts:
if hwm_id == host.get('hwm_id'):
params['host_id'] = host['id']
resp, host_body = self.client.get('/v1/nodes/%s' %
host['id'])
interfaces = host_body['host'].get('interfaces')
if interfaces:
mac_list = [interface['mac'] for interface in
interfaces if interface.get('mac')]
if mac_list:
params['mac'] = min(mac_list)
return params
def pxe_host_discover(self, **kwargs):
"""Pxe host discover."""
hdrs = dict()
hwm_ip = kwargs.get('hwm_ip')
hwm_id = kwargs.get('hwm_id')
hwm_client = self.get_hwm_client(hwm_ip)
pxe_url = '/api/v1.0/hardware/nodes/%s/one_time_boot?from=pxe' % \
hwm_id
resp, pxe_body = hwm_client.post(pxe_url, headers=hdrs, data=hdrs)
params = self.get_min_mac(hwm_id)
params['status'] = "DISCOVERING"
resp, body = self.client.post(
'/v1/pxe_discover/nodes', headers=params, data=params)
restart_url = '/api/v1.0/hardware/nodes/%s/restart_actions' % \
hwm_id
resp, restart_body = hwm_client.post(restart_url, headers=hdrs,
data=hdrs)
return_request_id = kwargs.get('return_req_id', None)
if return_request_id is not None:
return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None))
return Node(self, self._format_host_meta_for_user(restart_body))

View File

@ -1,248 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_utils import encodeutils
from oslo_utils import strutils
import six
import six.moves.urllib.parse as urlparse
from daisyclient.common import utils
from daisyclient.openstack.common.apiclient import base
UPDATE_PARAMS = ('hwm_ip', 'description')
CREATE_PARAMS = ('id', 'hwm_ip', 'description')
DEFAULT_PAGE_SIZE = 20
SORT_DIR_VALUES = ('asc', 'desc')
SORT_KEY_VALUES = ('name', 'created_at', 'updated_at')
OS_REQ_ID_HDR = 'x-openstack-request-id'
class Hwm(base.Resource):
def __repr__(self):
return "<Hwm %s>" % self._info
def update(self, **fields):
self.manager.update(self, **fields)
def delete(self, **kwargs):
return self.manager.delete(self)
def data(self, **kwargs):
return self.manager.data(self, **kwargs)
class HwmManager(base.ManagerWithFind):
resource_class = Hwm
def _list(self, url, response_key, obj_class=None, body=None):
resp, body = self.client.get(url)
if obj_class is None:
obj_class = self.resource_class
data = body[response_key]
return ([obj_class(self, res, loaded=True) for res in data if res],
resp)
def _service_meta_from_headers(self, headers):
meta = {'properties': {}}
safe_decode = encodeutils.safe_decode
for key, value in six.iteritems(headers):
value = safe_decode(value, incoming='utf-8')
if key.startswith('x-image-meta-property-'):
_key = safe_decode(key[22:], incoming='utf-8')
meta['properties'][_key] = value
elif key.startswith('x-image-meta-'):
_key = safe_decode(key[13:], incoming='utf-8')
meta[_key] = value
for key in ['is_public', 'protected', 'deleted']:
if key in meta:
meta[key] = strutils.bool_from_string(meta[key])
return self._format_template_meta_for_user(meta)
def _template_meta_to_headers(self, fields):
headers = {}
fields_copy = copy.deepcopy(fields)
# NOTE(flaper87): Convert to str, headers
# that are not instance of basestring. All
# headers will be encoded later, before the
# request is sent.
for key, value in six.iteritems(fields_copy):
headers['%s' % key] = utils.to_str(value)
return headers
@staticmethod
def _format_image_meta_for_user(meta):
for key in ['size', 'min_ram', 'min_disk']:
if key in meta:
try:
meta[key] = int(meta[key]) if meta[key] else 0
except ValueError:
pass
return meta
@staticmethod
def _format_template_meta_for_user(meta):
for key in ['size', 'min_ram', 'min_disk']:
if key in meta:
try:
meta[key] = int(meta[key]) if meta[key] else 0
except ValueError:
pass
return meta
def _build_params(self, parameters):
params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)}
if 'marker' in parameters:
params['marker'] = parameters['marker']
sort_key = parameters.get('sort_key')
if sort_key is not None:
if sort_key in SORT_KEY_VALUES:
params['sort_key'] = sort_key
else:
raise ValueError('sort_key must be one of the following: %s.'
% ', '.join(SORT_KEY_VALUES))
sort_dir = parameters.get('sort_dir')
if sort_dir is not None:
if sort_dir in SORT_DIR_VALUES:
params['sort_dir'] = sort_dir
else:
raise ValueError('sort_dir must be one of the following: %s.'
% ', '.join(SORT_DIR_VALUES))
filters = parameters.get('filters', {})
params.update(filters)
return params
def get(self, hwm_id):
"""get hwm information by id."""
url = "/v1/hwm/%s" % base.getid(hwm_id)
resp, body = self.client.get(url)
return Hwm(self, self._format_template_meta_for_user(body['hwm']))
def list(self, **kwargs):
"""Get a list of hwm.
:param page_size: number of items to request in each paginated request
:param limit: maximum number of services to return
:param marker: begin returning services that appear later in the
service ist than that represented by this service id
:param filters: dict of direct comparison filters that mimics the
structure of an service object
:param return_request_id: If an empty list is provided, populate this
list with the request ID value from the header
x-openstack-request-id
:rtype: list of :class:`Service`
"""
absolute_limit = kwargs.get('limit')
def paginate(qp, return_request_id=None):
for param, value in six.iteritems(qp):
if isinstance(value, six.string_types):
# Note(flaper87) Url encoding should
# be moved inside http utils, at least
# shouldn't be here.
#
# Making sure all params are str before
# trying to encode them
qp[param] = encodeutils.safe_decode(value)
url = '/v1/hwm?%s' % urlparse.urlencode(qp)
hwms, resp = self._list(url, "hwm")
if return_request_id is not None:
return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None))
for hwm in hwms:
yield hwm
return_request_id = kwargs.get('return_req_id', None)
params = self._build_params(kwargs)
seen = 0
seen_last_page = 0
for hwm in paginate(params, return_request_id):
if (absolute_limit is not None and
seen + seen_last_page >= absolute_limit):
# Note(kragniz): we've seen enough images
return
else:
seen_last_page += 1
yield hwm
def add(self, **kwargs):
"""Add a hwm.
TODO(bcwaldon): document accepted params
"""
fields = {}
for field in kwargs:
if field in CREATE_PARAMS:
fields[field] = kwargs[field]
elif field == 'return_req_id':
continue
else:
msg = 'create() got an unexpected keyword argument \'%s\''
raise TypeError(msg % field)
hdrs = self._template_meta_to_headers(fields)
resp, body = self.client.post('/v1/hwm', headers=hdrs, data=hdrs)
return_request_id = kwargs.get('return_req_id', None)
if return_request_id is not None:
return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None))
return Hwm(self, self._format_template_meta_for_user(body['hwm']))
def delete(self, hwm_id):
"""Delete a hwm."""
url = "/v1/hwm/%s" % base.getid(hwm_id)
resp, body = self.client.delete(url)
def update(self, hwm_id, **kwargs):
"""Update an hwm"""
hdrs = {}
fields = {}
for field in kwargs:
if field in UPDATE_PARAMS:
fields[field] = kwargs[field]
elif field == 'return_req_id':
continue
else:
msg = 'update() got an unexpected keyword argument \'%s\''
raise TypeError(msg % field)
hdrs.update(self._template_meta_to_headers(fields))
url = '/v1/hwm/%s' % base.getid(hwm_id)
resp, body = self.client.put(url, headers=None, data=hdrs)
return_request_id = kwargs.get('return_req_id', None)
if return_request_id is not None:
return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None))
return Hwm(self, self._format_template_meta_for_user(body['hwm']))

View File

@ -41,7 +41,6 @@ import daisyclient.v1.uninstall
import daisyclient.v1.update
import daisyclient.v1.disk_array
import daisyclient.v1.template
import daisyclient.v1.hwms
from daisyclient.v1 import param_helper
import daisyclient.v1.backup_restore
@ -97,10 +96,6 @@ def _daisy_show(daisy, max_column_width=80):
help='node network interface detail, \
ip must be given if assigned_networks is empty,\
and cluster must be given if assigned_networks is not empty.')
@utils.arg('--hwm-id', metavar='<HWM_ID>',
help='The id of hwm host.')
@utils.arg('--hwm-ip', metavar='<HWM_IP>',
help='The ip of hwm.')
@utils.arg('--vcpu-pin-set', metavar='<VCPU_PIN_SET>',
help='Set the vcpu pin.')
@utils.arg('--dvs-high-cpuset', metavar='<DVS_HIGH_CPUSET>',
@ -249,10 +244,6 @@ def do_host_delete(gc, args):
help='size of hugepage.')
@utils.arg('--hugepages', metavar='<HUGEPAGES>',
help='number of hugepages.')
@utils.arg('--hwm-id', metavar='<HWM_ID>',
help='The id of hwm host.')
@utils.arg('--hwm-ip', metavar='<HWM_IP>',
help='The ip of hwm.')
@utils.arg('--vcpu-pin-set', metavar='<VCPU_PIN_SET>',
help='Set the vcpu pin.')
@utils.arg('--dvs-high-cpuset', metavar='<DVS_HIGH_CPUSET>',
@ -345,9 +336,9 @@ def do_host_list(gc, args):
hosts = gc.hosts.list(**kwargs)
columns = ['ID', 'Hwm_id', 'Name', 'Description', 'Resource_type',
columns = ['ID', 'Name', 'Description', 'Resource_type',
'Status', 'Os_progress', 'Os_status', 'Discover_state',
'Messages', 'Hwm_ip']
'Messages']
# if filters.has_key('cluster_id'):
if 'cluster_id' in filters:
role_columns = ['Role_progress', 'Role_status', 'Role_messages']
@ -455,7 +446,7 @@ def do_discover_host_list(gc, args):
filters = dict([item for item in filter_items if item[1] is not None])
kwargs = {'filters': filters}
discover_hosts = gc.hosts.list_discover_host(**kwargs)
columns = ['Id', 'Mac', 'Ip', 'User', 'Passwd', 'Status', 'Message',
columns = ['Id', 'Ip', 'User', 'Passwd', 'Status', 'Message',
'Host_id', 'Cluster_id']
utils.print_list(discover_hosts, columns)
@ -692,7 +683,7 @@ def do_cluster_list(gc, args):
clusters = gc.clusters.list(**kwargs)
columns = ['ID', 'Name', 'Description', 'Nodes', 'Networks',
'Auto_scale', 'Use_dns', 'Hwm_ip', 'Status']
'Auto_scale', 'Use_dns', 'Status']
utils.print_list(clusters, columns)
@ -2308,161 +2299,6 @@ def do_delete_host_template(dc, args):
_daisy_show(host_template)
@utils.arg('--hwm-ip', metavar='<HWM_IP>', help='The ip of hwm')
def do_node_list(gc, args):
"""Get all nodes from hwm."""
fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))
result = gc.node.list(**fields)
columns = ['id', 'cpuCore', 'cpuFrequency', 'memory', 'disk',
'hardwareType', 'hardwareStatus', 'interfaces']
utils.print_list(result, columns, conver_field=False)
@utils.arg('--hwm-ip', metavar='<HWM_IP>', help='The ip of hwm')
@utils.arg('hwm_id', metavar='<HWM_ID>', help='The id of hwm')
def do_node_location(gc, args):
"""Get node location from hwm."""
fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))
location = gc.node.location(**fields)
_daisy_show(location)
@utils.arg('--hwm-ip', metavar='<HWM_IP>', help='The ip of hwm')
@utils.arg('hwm_id', metavar='<HWM_ID>', help='The id of hwm')
def do_node_restart(gc, args):
"""Restart node."""
fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))
location = gc.node.restart(**fields)
_daisy_show(location)
@utils.arg('--hwm-ip', metavar='<HWM_IP>', help='The ip of hwm')
@utils.arg('action_id', metavar='<ACTION_ID>',
help='The action id of nodes')
def do_restart_state(gc, args):
"""Get restart state of node."""
fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))
location = gc.node.restart_state(**fields)
_daisy_show(location)
@utils.arg('--hwm-ip', metavar='<HWM_IP>', help='The ip of hwm')
@utils.arg('--boot-type', metavar='<BOOT_TYPE>', help='The node boot type')
@utils.arg('hwm_id', metavar='<HWM_ID>', help='The id of hwm')
def do_set_boot(gc, args):
"""Set boot type of node."""
fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))
location = gc.node.set_boot(**fields)
_daisy_show(location)
@utils.arg('--hwm-ip', metavar='<HWM_IP>', help='The ip of hwm')
def do_node_update(gc, args):
"""Update hosts."""
fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))
result = gc.node.update(**fields)
columns = ['ID', 'Hwm_id', 'Hwm_ip', 'Name', 'Description',
'Resource_type', 'Status', 'Os_progress', 'Os_status',
'Messages']
utils.print_list(result, columns)
@utils.arg('hwm_id', metavar='<HWM_ID>', help='The id of hwm')
@utils.arg('--hwm-ip', metavar='<HWM_IP>', help='The ip of hwm')
def do_pxe_host_discover(gc, args):
"""Discover host with pxe."""
fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))
host = gc.node.pxe_host_discover(**fields)
_daisy_show(host)
@utils.arg('hwm_ip', metavar='<HWM_IP>',
help='Hwm ip to be added.')
@utils.arg('--description', metavar='<DESCRIPTION>',
help='Hwm description to be added.')
def do_hwm_add(gc, args):
"""Add a hwm."""
fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))
# Filter out values we can't use
CREATE_PARAMS = daisyclient.v1.hwms.CREATE_PARAMS
fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items()))
hwm = gc.hwm.add(**fields)
_daisy_show(hwm)
@utils.arg('hwm', metavar='<HWM>', help='ID of hwm to modify.')
@utils.arg('--hwm-ip', metavar='<HWM_IP>', help='The ip of hwm.')
@utils.arg('--description', metavar='<DESCRIPTION>',
help='Description of hwm.')
def do_hwm_update(gc, args):
"""Update a specific hwm."""
fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))
hwm_arg = fields.pop('hwm')
hwm = utils.find_resource(gc.hwm, hwm_arg)
# Filter out values we can't use
UPDATE_PARAMS = daisyclient.v1.hwms.UPDATE_PARAMS
fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items()))
hwm = gc.hwm.update(hwm, **fields)
_daisy_show(hwm)
@utils.arg('--page-size', metavar='<SIZE>', default=None, type=int,
help='Number of hwms to request in each paginated request.')
@utils.arg('--sort-key', default=None,
choices=daisyclient.v1.hwms.SORT_KEY_VALUES,
help='Sort hwm list by specified field.')
@utils.arg('--sort-dir', default='asc',
choices=daisyclient.v1.hwms.SORT_DIR_VALUES,
help='Sort hwm list in specified direction.')
def do_hwm_list(gc, args):
"""List hwms you can access."""
kwargs = {'filters': {}}
if args.page_size is not None:
kwargs['page_size'] = args.page_size
kwargs['sort_key'] = args.sort_key
kwargs['sort_dir'] = args.sort_dir
hwms = gc.hwm.list(**kwargs)
columns = ['ID', 'Hwm_ip', 'Description']
utils.print_list(hwms, columns)
@utils.arg('id', metavar='<ID>',
help='Filter hwm to those that have this id.')
def do_hwm_detail(gc, args):
"""List hwm you can access."""
host = utils.find_resource(gc.hwm, args.id)
_daisy_show(host)
@utils.arg('hwms', metavar='<HWM>', nargs='+',
help='ID of hwm(s) to delete.')
def do_hwm_delete(gc, args):
"""Delete specified hwm(s)."""
for args_hwm in args.hwms:
hwm = utils.find_resource(gc.hwm, args_hwm)
if hwm and hwm.deleted:
msg = "No hwm with an ID of '%s' exists." % hwm.id
raise exc.CommandError(msg)
try:
if args.verbose:
print('Requesting hwm delete for %s ...' %
encodeutils.safe_decode(args_hwm), end=' ')
gc.hwm.delete(hwm)
if args.verbose:
print('[Done]')
except exc.HTTPException as e:
if args.verbose:
print('[Fail]')
print('%s: Unable to delete hwm %s' % (e, args_hwm))
@utils.arg('--provider-ip', metavar='<PROVIDER_IP>',
help='The ip of provider.')
@utils.arg('--operation', metavar='<OPERATION>',

View File

@ -75,39 +75,6 @@ class BaseDaisyTest(tempest.test.BaseTestCase):
for cluster in clusters_list:
self.delete_cluster(cluster)
@classmethod
def add_hwm(self, **hwm_meta):
hwm_info = self.daisy_client.hwm.add(**hwm_meta)
return hwm_info
@classmethod
def delete_hwm(self, hwm_meta):
self.daisy_client.hwm.delete(hwm_meta)
@classmethod
def update_hwm(self, hwm_id, **hwm_meta):
hwm_info = self.daisy_client.hwm.update(hwm_id, **hwm_meta)
return hwm_info
@classmethod
def _clean_all_hwm(self):
hwm_list_generator = self.daisy_client.hwm.list()
hwm_list = [hwms for hwms in hwm_list_generator]
if hwm_list:
for hwm in hwm_list:
self.delete_hwm(hwm)
@classmethod
def list_hwm(self, **hwm_meta):
hwm_meta['filters'] = hwm_meta
hwm_list = self.daisy_client.hwm.list(**hwm_meta)
return hwm_list
@classmethod
def get_hwm_detail(self, hwm_meta):
hwm_detail = self.daisy_client.hwm.get(hwm_meta)
return hwm_detail
@classmethod
def add_host(self, **host_meta):
host_info = self.daisy_client.hosts.add(**host_meta)

View File

@ -111,9 +111,6 @@ class TecsClusterTest(base.BaseDaisyTest):
cls.cluster_meta8 = {'description': "test_add_host7",
'name': "test_add_host7",
'auto_scale': 1}
cls.cluster_meta9 = {'description': "test_with_hwm",
'name': "test_with_hwm",
'hwm_ip': "10.43.211.63"}
def private_network_add(self):
private_network_params = self.fake.fake_private_network_parameters()
@ -262,17 +259,6 @@ class TecsClusterTest(base.BaseDaisyTest):
# cluster = self.get_cluster(cluster_info.id)
self.delete_cluster(cluster_info.id)
def test_update_cluster_with_hwm(self):
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta1)
hwm_meta = {"hwm_ip": "10.43.211.63"}
cluster_update_info = self.update_cluster(cluster_info.id,
**hwm_meta)
self.assertEqual("10.43.211.63",
cluster_update_info.hwm_ip,
"Update cluster with hwm_ip failed")
self.delete_cluster(cluster_info.id)
def test_update_cluster_with_networking_parameters_add_router(self):
""" """
self.private_network_add()
@ -460,15 +446,6 @@ class TecsClusterTest(base.BaseDaisyTest):
"===============")
self.delete_cluster(cluster_info.id)
def test_add_cluster_with_hwm(self):
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta9)
if cluster_info:
self.assertEqual(self.cluster_meta9['hwm_ip'],
cluster_info.hwm_ip,
"Add cluster with hwm_ip failed")
self.delete_cluster(cluster_info.id)
def tearDown(self):
if self.cluster_meta1.get('nodes', None):
del self.cluster_meta1['nodes']

View File

@ -1,61 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.daisy import base
from tempest import config
from fake.logical_network_fake import FakeLogicNetwork as logical_fake
CONF = config.CONF
class DaisyHwmTest(base.BaseDaisyTest):
@classmethod
def resource_setup(cls):
super(DaisyHwmTest, cls).resource_setup()
cls.fake = logical_fake()
cls.hwm_meta = {'hwm_ip': '10.43.211.63',
'description': 'the first hwm'}
def test_add_hwm(self):
hwm = self.add_hwm(**self.hwm_meta)
self.assertEqual("10.43.211.63", hwm.hwm_ip, "add-hwm failed")
def test_update_hwm(self):
update_hwm_meta = {'hwm_ip': '10.43.174.11'}
add_hwm = self.add_hwm(**self.hwm_meta)
update_hwm = self.update_hwm(add_hwm.id, **update_hwm_meta)
self.assertEqual("10.43.174.11", update_hwm.hwm_ip,
"update-hwm failed")
def test_hwm_detail_info(self):
add_hwm = self.add_hwm(**self.hwm_meta)
hwm_detail = self.get_hwm_detail(add_hwm.id)
self.assertEqual("10.43.211.63", hwm_detail.hwm_ip,
"test_hwm_detail_info failed")
def test_hwm_list(self):
self.add_hwm(**self.hwm_meta)
hwms = self.list_hwm()
for hwm in hwms:
self.assertTrue(hwm is not None)
def test_hwm_delete(self):
hwm = self.add_hwm(**self.hwm_meta)
self.delete_hwm(hwm.id)
def tearDown(self):
self._clean_all_hwm()
super(DaisyHwmTest, self).tearDown()