diff --git a/code/daisy/daisy/api/backends/common.py b/code/daisy/daisy/api/backends/common.py index 8db42988..f58c676c 100644 --- a/code/daisy/daisy/api/backends/common.py +++ b/code/daisy/daisy/api/backends/common.py @@ -40,6 +40,48 @@ kolla_backend_name = "kolla" os_install_start_time = 0.0 +# This is used for mapping daisy service id to systemctl service name +# Only used by non containerized deploy tools such as clush/puppet. + +service_map = { + 'lb': 'haproxy', + 'mongodb': 'mongod', + 'ha': '', + 'mariadb': 'mariadb', + 'amqp': 'rabbitmq-server', + 'ceilometer-api': 'openstack-ceilometer-api', + 'ceilometer-collector': 'openstack-ceilometer-collector,\ + openstack-ceilometer-mend', + 'ceilometer-central': 'openstack-ceilometer-central', + 'ceilometer-notification': 'openstack-ceilometer-notification', + 'ceilometer-alarm': 'openstack-ceilometer-alarm-evaluator,\ + openstack-ceilometer-alarm-notifier', + 'heat-api': 'openstack-heat-api', + 'heat-api-cfn': 'openstack-heat-api-cfn', + 'heat-engine': 'openstack-heat-engine', + 'ironic': 'openstack-ironic-api,openstack-ironic-conductor', + 'horizon': 'httpd,opencos-alarmmanager', + 'keystone': 'openstack-keystone', + 'glance': 'openstack-glance-api,openstack-glance-registry', + 'cinder-volume': 'openstack-cinder-volume', + 'cinder-scheduler': 'openstack-cinder-scheduler', + 'cinder-api': 'openstack-cinder-api', + 'neutron-metadata': 'neutron-metadata-agent', + 'neutron-lbaas': 'neutron-lbaas-agent', + 'neutron-dhcp': 'neutron-dhcp-agent', + 'neutron-server': 'neutron-server', + 'neutron-l3': 'neutron-l3-agent', + 'compute': 'openstack-nova-compute', + 'nova-cert': 'openstack-nova-cert', + 'nova-sched': 'openstack-nova-scheduler', + 'nova-vncproxy': 'openstack-nova-novncproxy,openstack-nova-consoleauth', + 'nova-conductor': 'openstack-nova-conductor', + 'nova-api': 'openstack-nova-api', + 'nova-cells': 'openstack-nova-cells', + 'camellia-api': 'camellia-api' +} + + def subprocess_call(command, file=None): if file: return_code = subprocess.call(command, @@ -366,3 +408,168 @@ def calc_host_iqn(min_mac): get_uuid = stdoutput.split('=')[1] iqn = "iqn.opencos.rh:" + get_uuid.strip() return iqn + + +def _get_cluster_network(cluster_networks, network_type): + network = [cn for cn in cluster_networks if cn['name'] in network_type] + if not network or not network[0]: + msg = "network %s is not exist" % (network_type) + raise exception.InvalidNetworkConfig(msg) + else: + return network[0] + + +def get_host_interface_by_network(host_detail, network_type): + host_detail_info = copy.deepcopy(host_detail) + interface_list = [hi for hi in host_detail_info['interfaces'] + for assigned_network in hi['assigned_networks'] + if assigned_network and + network_type == assigned_network['name']] + interface = {} + if interface_list: + interface = interface_list[0] + if not interface: + msg = "network %s of host %s is not exist" % ( + network_type, host_detail_info['id']) + raise exception.InvalidNetworkConfig(msg) + return interface + + +def get_host_network_ip(req, host_detail, cluster_networks, network_name): + interface_network_ip = '' + host_interface = get_host_interface_by_network(host_detail, network_name) + if host_interface: + network = _get_cluster_network(cluster_networks, network_name) + assigned_network = get_assigned_network(req, + host_interface['id'], + network['id']) + interface_network_ip = assigned_network['ip'] + + if not interface_network_ip and 'MANAGEMENT' == network_name: + msg = "%s network ip of host %s can't be empty" % ( + network_name, host_detail['id']) + raise exception.InvalidNetworkConfig(msg) + return interface_network_ip + + +def get_service_disk_list(req, params): + try: + service_disks = registry.list_service_disk_metadata( + req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return service_disks + + +def sort_interfaces_by_pci(networks, host_detail): + """ + Sort interfaces by pci segment, if interface type is bond, + user the pci of first memeber nic.This function is fix bug for + the name length of ovs virtual port, because if the name length large than + 15 characters, the port will create failed. + :param interfaces: interfaces info of the host + :return: + """ + interfaces = eval(host_detail.get('interfaces', None)) \ + if isinstance(host_detail, unicode) else \ + host_detail.get('interfaces', None) + if not interfaces: + LOG.info("This host has no interfaces info.") + return host_detail + + tmp_interfaces = copy.deepcopy(interfaces) + + slaves_name_list = [] + for interface in tmp_interfaces: + if interface.get('type', None) == "bond" and\ + interface.get('slave1', None) and\ + interface.get('slave2', None): + slaves_name_list.append(interface['slave1']) + slaves_name_list.append(interface['slave2']) + + for interface in interfaces: + if interface.get('name') not in slaves_name_list: + vlan_id_len_list = [len(network['vlan_id']) + for assigned_network in interface.get( + 'assigned_networks', []) + for network in networks + if assigned_network.get('name') == + network.get('name') and network.get('vlan_id')] + max_vlan_id_len = max(vlan_id_len_list) if vlan_id_len_list else 0 + interface_name_len = len(interface['name']) + redundant_bit = interface_name_len + max_vlan_id_len - 14 + interface['name'] = interface['name'][ + redundant_bit:] if redundant_bit > 0 else interface['name'] + return host_detail + + +def run_scrip(script, ip=None, password=None, msg=None): + try: + _run_scrip(script, ip, password) + except: + msg1 = 'Error occurred during running scripts.' + message = msg1 + msg if msg else msg1 + LOG.error(message) + raise HTTPForbidden(explanation=message) + else: + LOG.info('Running scripts successfully!') + + +def _run_scrip(script, ip=None, password=None): + mask_list = [] + repl_list = [("'", "'\\''")] + script = "\n".join(script) + _PIPE = subprocess.PIPE + if ip: + cmd = ["sshpass", "-p", "%s" % password, + "ssh", "-o StrictHostKeyChecking=no", + "%s" % ip, "bash -x"] + else: + cmd = ["bash", "-x"] + environ = os.environ + environ['LANG'] = 'en_US.UTF8' + obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, + close_fds=True, shell=False, env=environ) + + script = "function t(){ exit $? ; } \n trap t ERR \n" + script + out, err = obj.communicate(script) + masked_out = mask_string(out, mask_list, repl_list) + masked_err = mask_string(err, mask_list, repl_list) + if obj.returncode: + pattern = (r'^ssh\:') + if re.search(pattern, err): + LOG.error(_("Network error occured when run script.")) + raise exception.NetworkError(masked_err, stdout=out, stderr=err) + else: + msg = ('Failed to run remote script, stdout: %s\nstderr: %s' % + (masked_out, masked_err)) + LOG.error(msg) + raise exception.ScriptRuntimeError(msg, stdout=out, stderr=err) + return obj.returncode, out + + +def get_ctl_ha_nodes_min_mac(req, cluster_id): + ''' + ctl_ha_nodes_min_mac = {'host_name1':'min_mac1', ...} + ''' + ctl_ha_nodes_min_mac = {} + roles = get_cluster_roles_detail(req, cluster_id) + cluster_networks =\ + get_cluster_networks_detail(req, cluster_id) + for role in roles: + if role['deployment_backend'] != tecs_backend_name: + continue + role_hosts = get_hosts_of_role(req, role['id']) + for role_host in role_hosts: + # host has installed tecs are exclusive + if (role_host['status'] == TECS_STATE['ACTIVE'] or + role_host['status'] == TECS_STATE['UPDATING'] or + role_host['status'] == TECS_STATE['UPDATE_FAILED']): + continue + host_detail = get_host_detail(req, + role_host['host_id']) + host_name = host_detail['name'] + if role['name'] == "CONTROLLER_HA": + min_mac = utils.get_host_min_mac(host_detail['interfaces']) + ctl_ha_nodes_min_mac[host_name] = min_mac + return ctl_ha_nodes_min_mac diff --git a/code/daisy/daisy/api/backends/os.py b/code/daisy/daisy/api/backends/os.py index a75fc5fa..8c791b0a 100644 --- a/code/daisy/daisy/api/backends/os.py +++ b/code/daisy/daisy/api/backends/os.py @@ -14,7 +14,7 @@ # under the License. """ -/install endpoint for tecs API +/install endpoint for daisy API """ import copy import subprocess @@ -24,7 +24,6 @@ import json from oslo_config import cfg from oslo_log import log as logging from webob.exc import HTTPBadRequest -import threading from daisy import i18n @@ -32,13 +31,9 @@ from daisy.common import exception from daisy.api import common from daisy.common import utils import daisy.registry.client.v1.api as registry -from daisyclient.v1 import client as daisy_client import daisy.api.backends.common as daisy_cmn -import daisy.api.backends.tecs.common as tecs_cmn -import ConfigParser - LOG = logging.getLogger(__name__) _ = i18n._ _LE = i18n._LE @@ -72,17 +67,6 @@ LINUX_BOND_MODE = {'balance-rr': '0', 'active-backup': '1', '802.3ad': '4', 'balance-tlb': '5', 'balance-alb': '6'} -daisy_tecs_path = tecs_cmn.daisy_tecs_path - - -def get_daisyclient(): - """Get Daisy client instance.""" - config_daisy = ConfigParser.ConfigParser() - config_daisy.read("/etc/daisy/daisy-api.conf") - daisy_port = config_daisy.get("DEFAULT", "bind_port") - args = {'version': 1.0, 'endpoint': 'http://127.0.0.1:' + daisy_port} - return daisy_client.Client(**args) - def pxe_server_build(req, install_meta): params = {'filters': {'type': 'system'}} @@ -179,7 +163,7 @@ def _get_network_plat(req, host_config, cluster_networks, dhcp_mac): alias.append(cluster_network['alias']) # convert cidr to netmask cidr_to_ip = "" - assigned_networks_ip = tecs_cmn.get_host_network_ip( + assigned_networks_ip = daisy_cmn.get_host_network_ip( req, host_config_orig, cluster_networks, network_name) if cluster_network.get('cidr', None): inter_ip = lambda x: '.'.join( @@ -243,7 +227,7 @@ def get_cluster_hosts_config(req, cluster_id): role['name'] in host_detail['role'] and\ role['nova_lv_size']: host_detail['nova_lv_size'] = role['nova_lv_size'] - service_disks = tecs_cmn.get_service_disk_list( + service_disks = daisy_cmn.get_service_disk_list( req, {'role_id': role['id']}) for service_disk in service_disks: if service_disk['disk_location'] == 'local' and\ @@ -278,32 +262,11 @@ def get_cluster_hosts_config(req, cluster_id): host_config = _get_network_plat(req, host_config_detail, networks, pxe_macs[0]) - hosts_config.append(tecs_cmn.sort_interfaces_by_pci(networks, - host_config)) + hosts_config.append(daisy_cmn.sort_interfaces_by_pci(networks, + host_config)) return hosts_config -def check_tfg_exist(): - get_tfg_patch = "ls %s|grep CGSL_VPLAT-.*\.iso$" % daisy_tecs_path - obj = subprocess.Popen(get_tfg_patch, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - (stdoutput, erroutput) = obj.communicate() - tfg_patch_pkg_file = "" - tfg_patch_pkg_name = "" - if stdoutput: - tfg_patch_pkg_name = stdoutput.split('\n')[0] - tfg_patch_pkg_file = daisy_tecs_path + tfg_patch_pkg_name - chmod_for_tfg_bin = 'chmod +x %s' % tfg_patch_pkg_file - daisy_cmn.subprocess_call(chmod_for_tfg_bin) - - if not stdoutput or not tfg_patch_pkg_name: - LOG.info(_("no CGSL_VPLAT iso file got in %s" % daisy_tecs_path)) - return "" - return tfg_patch_pkg_file - - def update_db_host_status(req, host_id, host_status): """ Update host status and intallation progress to db. @@ -339,7 +302,6 @@ class OSInstall(): self.max_parallel_os_num = int(CONF.max_parallel_os_number) self.cluster_hosts_install_timeout = ( self.max_parallel_os_num / 4 + 2) * 60 * (12 * self.time_step) - self.daisyclient = get_daisyclient() def _set_boot_or_power_state(self, user, passwd, addr, action): count = 0 @@ -449,25 +411,17 @@ class OSInstall(): hugepagesize = '1G' # tfg_patch_pkg_file = check_tfg_exist() - if host_detail.get('hwm_id'): - host_hwm_meta = { - "hwm_ip": host_detail.get('hwm_ip'), - "hwm_id": host_detail.get('hwm_id'), - "boot_type": "pxe" - } - self.daisyclient.node.set_boot(**host_hwm_meta) - else: - if (not host_detail['ipmi_user'] or - not host_detail['ipmi_passwd'] or - not host_detail['ipmi_addr']): - self.message = "Invalid ipmi information configed for host %s"\ - % host_detail['id'] - raise exception.NotFound(message=self.message) + if (not host_detail['ipmi_user'] or + not host_detail['ipmi_passwd'] or + not host_detail['ipmi_addr']): + self.message = "Invalid ipmi information configed for host %s" \ + % host_detail['id'] + raise exception.NotFound(message=self.message) - self._set_boot_or_power_state(host_detail['ipmi_user'], - host_detail['ipmi_passwd'], - host_detail['ipmi_addr'], - 'pxe') + self._set_boot_or_power_state(host_detail['ipmi_user'], + host_detail['ipmi_passwd'], + host_detail['ipmi_addr'], + 'pxe') kwargs = {'hostname': host_detail['name'], 'iso_path': os_version_file, @@ -537,17 +491,10 @@ class OSInstall(): msg = "install os return failed for host %s" % host_detail['id'] raise exception.OSInstallFailed(message=msg) - if host_detail.get('hwm_id'): - host_hwm_meta = { - "hwm_ip": host_detail.get('hwm_ip'), - "hwm_id": host_detail.get('hwm_id') - } - self.daisyclient.node.restart(**host_hwm_meta) - else: - self._set_boot_or_power_state(host_detail['ipmi_user'], - host_detail['ipmi_passwd'], - host_detail['ipmi_addr'], - 'reset') + self._set_boot_or_power_state(host_detail['ipmi_user'], + host_detail['ipmi_passwd'], + host_detail['ipmi_addr'], + 'reset') def _begin_install_os(self, hosts_detail): # all hosts status is set to 'pre-install' before os installing @@ -562,26 +509,15 @@ class OSInstall(): def _set_disk_start_mode(self, host_detail): LOG.info(_("Set boot from disk for host %s" % (host_detail['id']))) - if host_detail.get('hwm_id'): - host_hwm_meta = { - "hwm_ip": host_detail.get('hwm_ip'), - "hwm_id": host_detail.get('hwm_id'), - "boot_type": "disk" - } - self.daisyclient.node.set_boot(**host_hwm_meta) - LOG.info(_("reboot host %s" % (host_detail['id']))) - host_hwm_meta.pop('boot_type') - self.daisyclient.node.restart(**host_hwm_meta) - else: - self._set_boot_or_power_state(host_detail['ipmi_user'], - host_detail['ipmi_passwd'], - host_detail['ipmi_addr'], - 'disk') - LOG.info(_("reboot host %s" % (host_detail['id']))) - self._set_boot_or_power_state(host_detail['ipmi_user'], - host_detail['ipmi_passwd'], - host_detail['ipmi_addr'], - 'reset') + self._set_boot_or_power_state(host_detail['ipmi_user'], + host_detail['ipmi_passwd'], + host_detail['ipmi_addr'], + 'disk') + LOG.info(_("reboot host %s" % (host_detail['id']))) + self._set_boot_or_power_state(host_detail['ipmi_user'], + host_detail['ipmi_passwd'], + host_detail['ipmi_addr'], + 'reset') def _init_progress(self, host_detail, hosts_status): host_id = host_detail['id'] @@ -742,190 +678,3 @@ class OSInstall(): else: role_hosts_ids.remove(host_id) return (hosts_detail, role_hosts_ids) - - -def _os_thread_bin(req, host_ip, host_id): - host_meta = {} - password = "ossdbg1" - LOG.info(_("Begin update os for host %s." % (host_ip))) - cmd = 'mkdir -p /var/log/daisy/daisy_update/' - daisy_cmn.subprocess_call(cmd) - - var_log_path = "/var/log/daisy/daisy_update/%s_update_tfg.log" % host_ip - with open(var_log_path, "w+") as fp: - cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) - daisy_cmn.subprocess_call(cmd, fp) - cmd = 'clush -S -w %s "mkdir -p /home/daisy_update/"' % (host_ip,) - daisy_cmn.subprocess_call(cmd, fp) - cmd = 'clush -S -b -w %s "rm -rf /home/daisy_update/*"' % (host_ip,) - daisy_cmn.subprocess_call(cmd, fp) - cmd = 'clush -S -w %s -c /var/lib/daisy/tecs/*CGSL_VPLAT*.iso\ - /var/lib/daisy/tecs/tfg_upgrade.sh \ - --dest=/home/daisy_update' % ( - host_ip,) - daisy_cmn.subprocess_call(cmd, fp) - cmd = 'clush -S -w %s "chmod 777 /home/daisy_update/*"' % (host_ip,) - daisy_cmn.subprocess_call(cmd, fp) - host_meta['os_progress'] = 30 - host_meta['os_status'] = host_os_status['UPDATING'] - host_meta['messages'] = "os updating,copy iso successfully" - update_db_host_status(req, host_id, host_meta) - try: - exc_result = subprocess.check_output( - 'clush -S -w %s "/home/daisy_update/tfg_upgrade.sh"' % ( - host_ip,), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - if e.returncode == 255 and "reboot" in e.output.strip(): - host_meta['os_progress'] = 100 - host_meta['os_status'] = host_os_status['ACTIVE'] - host_meta['messages'] = "upgrade tfg successfully,os reboot" - LOG.info( - _("Update tfg for %s successfully,os reboot!" % host_ip)) - daisy_cmn.check_reboot_ping(host_ip) - else: - host_meta['os_progress'] = 0 - host_meta['os_status'] = host_os_status['UPDATE_FAILED'] - host_meta[ - 'messages'] =\ - e.output.strip()[-400:-200].replace('\n', ' ') - LOG.error(_("Update tfg for %s failed!" % host_ip)) - update_db_host_status(req, host_id, host_meta) - fp.write(e.output.strip()) - else: - host_meta['os_progress'] = 100 - host_meta['os_status'] = host_os_status['ACTIVE'] - host_meta['messages'] = "upgrade tfg successfully" - update_db_host_status(req, host_id, host_meta) - LOG.info(_("Update os for %s successfully!" % host_ip)) - fp.write(exc_result) - if "reboot" in exc_result: - daisy_cmn.check_reboot_ping(host_ip) - - -# this will be raise raise all the exceptions of the thread to log file -def os_thread_bin(req, host_ip, host_id): - try: - _os_thread_bin(req, host_ip, host_id) - except Exception as e: - LOG.exception(e.message) - raise exception.ThreadBinException(message=e.message) - - -def _get_host_os_version(host_ip, host_pwd='ossdbg1'): - version = "" - tfg_version_file = '/usr/sbin/tfg_showversion' - try: - subprocess.check_output("sshpass -p %s ssh -o StrictHostKeyChecking=no" - " %s test -f %s" % (host_pwd, host_ip, - tfg_version_file), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - LOG.info(_("Host %s os version is TFG" % host_ip)) - return version - try: - process =\ - subprocess.Popen(["sshpass", "-p", "%s" % host_pwd, "ssh", - "-o StrictHostKeyChecking=no", "%s" % host_ip, - 'tfg_showversion'], shell=False, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - version = process.stdout.read().strip('\n') - except subprocess.CalledProcessError: - msg = _("Get host %s os version by subprocess failed!" % host_ip) - raise exception.SubprocessCmdFailed(message=msg) - - if version: - LOG.info(_("Host %s os version is %s" % (host_ip, version))) - return version - else: - msg = _("Get host %s os version by tfg_showversion failed!" % host_ip) - LOG.error(msg) - raise exception.Invalid(message=msg) - - -def _cmp_os_version(new_os_file, old_os_version, - target_host_ip, password='ossdbg1'): - shell_file = '/usr/sbin/tfg_showversion' - if old_os_version: - try: - subprocess.check_output("test -f %s" % shell_file, shell=True, - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - scripts = ["sshpass -p %s scp -r -o\ - StrictHostKeyChecking=no %s:%s " - "/usr/sbin/" % (password, target_host_ip, shell_file)] - tecs_cmn.run_scrip(scripts) - - cmp_script = "tfg_showversion %s %s" % (new_os_file, old_os_version) - try: - result = subprocess.check_output(cmp_script, shell=True, - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - return -1 - else: - if new_os_file.find("Mimosa") != -1: - return 0 - else: - msg = _("Please use Mimosa os to upgrade instead of TFG") - LOG.error(msg) - raise exception.Forbidden(message=msg) - return result.find("yes") - - -def upgrade_os(req, hosts_list): - upgrade_hosts = [] - max_parallel_os_upgrade_number = int(CONF.max_parallel_os_upgrade_number) - while hosts_list: - host_meta = {} - threads = [] - if len(hosts_list) > max_parallel_os_upgrade_number: - upgrade_hosts = hosts_list[:max_parallel_os_upgrade_number] - hosts_list = hosts_list[max_parallel_os_upgrade_number:] - else: - upgrade_hosts = hosts_list - hosts_list = [] - - new_os_file = check_tfg_exist() - for host_info in upgrade_hosts: - host_id = host_info.keys()[0] - host_ip = host_info.values()[0] - host_detail = daisy_cmn.get_host_detail(req, host_id) - target_host_os = _get_host_os_version( - host_ip, host_detail['root_pwd']) - - if _cmp_os_version(new_os_file, target_host_os, host_ip) != -1: - host_meta['os_progress'] = 10 - host_meta['os_status'] = host_os_status['UPDATING'] - host_meta['messages'] = "os updating,begin copy iso" - update_db_host_status(req, host_id, host_meta) - t = threading.Thread(target=os_thread_bin, args=(req, host_ip, - host_id)) - t.setDaemon(True) - t.start() - threads.append(t) - else: - LOG.warn(_("new os version is lower than or equal to that of " - "host %s, don't need to upgrade!" % host_ip)) - try: - for t in threads: - t.join() - except: - LOG.warn(_("Join update thread %s failed!" % t)) - else: - for host_info in upgrade_hosts: - update_failed_flag = False - host_id = host_info.keys()[0] - host_ip = host_info.values()[0] - host = registry.get_host_metadata(req.context, host_id) - if host['os_status'] == host_os_status['UPDATE_FAILED'] or\ - host['os_status'] == host_os_status['INIT']: - update_failed_flag = True - raise exception.ThreadBinException( - "%s update tfg failed! %s" % ( - host_ip, host['messages'])) - if not update_failed_flag: - host_meta = {} - host_meta['os_progress'] = 100 - host_meta['os_status'] = host_os_status['ACTIVE'] - host_meta['messages'] = "upgrade tfg successfully" - update_db_host_status(req, host_id, host_meta) diff --git a/code/daisy/daisy/api/backends/proton/__init__.py b/code/daisy/daisy/api/backends/proton/__init__.py deleted file mode 100755 index e69de29b..00000000 diff --git a/code/daisy/daisy/api/backends/proton/api.py b/code/daisy/daisy/api/backends/proton/api.py deleted file mode 100755 index 677afcf2..00000000 --- a/code/daisy/daisy/api/backends/proton/api.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/install endpoint for proton API -""" -from oslo_log import log as logging - -import threading - -from daisy import i18n - -from daisy.common import exception -from daisy.api.backends import driver -import daisy.api.backends.common as daisy_cmn -import daisy.api.backends.proton.common as proton_cmn -import daisy.api.backends.proton.install as instl -import daisy.api.backends.proton.uninstall as unstl - - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW - -proton_state = proton_cmn.PROTON_STATE - - -class API(driver.DeploymentDriver): - """ - The hosts API is a RESTful web service for host data. The API - is as follows:: - - GET /hosts -- Returns a set of brief metadata about hosts - GET /hosts/detail -- Returns a set of detailed metadata about - hosts - HEAD /hosts/ -- Return metadata about an host with id - GET /hosts/ -- Return host data for host with id - POST /hosts -- Store host data and return metadata about the - newly-stored host - PUT /hosts/ -- Update host metadata and/or upload host - data for a previously-reserved host - DELETE /hosts/ -- Delete the host with id - """ - def __init__(self): - super(API, self).__init__() - return - - def install(self, req, cluster_id): - """ - Install PROTON to a cluster. - cluster_id:cluster id - """ - proton_install_task = instl.ProtonInstallTask(req, cluster_id) - proton_install_task.start() - - def _uninstall(self, req, role_id, threads): - try: - for t in threads: - t.setDaemon(True) - t.start() - LOG.info(_("uninstall threads have started," - " please waiting....")) - - for t in threads: - t.join() - except: - LOG.warn(_("Join uninstall thread failed!")) - else: - uninstall_failed_flag = False - role = daisy_cmn.get_role_detail(req, role_id) - if role['progress'] == 100: - unstl.update_progress_to_db( - req, role_id, proton_state['UNINSTALL_FAILED']) - uninstall_failed_flag = True - return - if role['status'] == proton_state['UNINSTALL_FAILED']: - uninstall_failed_flag = True - return - if not uninstall_failed_flag: - LOG.info(_("all uninstall threads have done," - " set role of proton status to 'init'!")) - unstl.update_progress_to_db(req, role_id, - proton_state['INIT']) - - def uninstall(self, req, cluster_id): - """ - Uninstall PROTON to a cluster. - :raises HTTPBadRequest if x-install-cluster is missing - """ - (role_id, hosts_list) = proton_cmn.get_roles_and_hosts_list(req, - cluster_id) - if role_id: - if not hosts_list: - msg = _("there is no host in cluster %s") % cluster_id - raise exception.ThreadBinException(msg) - - unstl.update_progress_to_db(req, role_id, - proton_state['UNINSTALLING'], 0.0) - uninstall_progress_percentage = \ - round(1 * 1.0 / len(hosts_list), 2) * 100 - - threads = [] - for host in hosts_list: - host_detail = proton_cmn.get_host_detail(req, host['host_id']) - t = threading.Thread(target=unstl.thread_bin, - args=(req, - host_detail['interfaces'][0]['ip'], - role_id, - uninstall_progress_percentage)) - threads.append(t) - - self._uninstall(req, role_id, threads) diff --git a/code/daisy/daisy/api/backends/proton/common.py b/code/daisy/daisy/api/backends/proton/common.py deleted file mode 100755 index 552119ed..00000000 --- a/code/daisy/daisy/api/backends/proton/common.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/install endpoint for proton API -""" -import subprocess -from oslo_log import log as logging -from webob.exc import HTTPBadRequest - -from daisy import i18n - -from daisy.common import exception -import daisy.registry.client.v1.api as registry -import daisy.api.backends.common as daisy_cmn - - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW - -daisy_proton_path = '/var/lib/daisy/proton/' -PROTON_STATE = { - 'INIT': 'init', - 'INSTALLING': 'installing', - 'ACTIVE': 'active', - 'INSTALL_FAILED': 'install-failed', - 'UNINSTALLING': 'uninstalling', - 'UNINSTALL_FAILED': 'uninstall-failed', - 'UPDATING': 'updating', - 'UPDATE_FAILED': 'update-failed', -} - - -def get_host_detail(req, host_id): - try: - host_detail = registry.get_host_metadata(req.context, host_id) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return host_detail - - -def get_roles_detail(req): - try: - roles = registry.get_roles_detail(req.context) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return roles - - -def get_hosts_of_role(req, role_id): - try: - hosts = registry.get_role_host_metadata(req.context, role_id) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return hosts - - -def get_roles_and_hosts_list(req, cluster_id): - roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) - for role in roles: - if role['deployment_backend'] == daisy_cmn.proton_backend_name: - role_hosts = get_hosts_of_role(req, role['id']) - return (role['id'], role_hosts) - - -def get_role_detail(req, role_id): - try: - role = registry.get_role_metadata(req.context, role_id) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return role - - -def check_and_get_proton_version(daisy_proton_path): - proton_version_pkg_name = "" - get_proton_version_pkg = "ls %s| grep ^ZXDTC-PROTON.*\.bin$" \ - % daisy_proton_path - obj = subprocess.Popen( - get_proton_version_pkg, shell=True, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - (stdoutput, erroutput) = obj.communicate() - if stdoutput: - proton_version_pkg_name = stdoutput.split('\n')[0] - proton_version_pkg_file = daisy_proton_path + proton_version_pkg_name - chmod_for_proton_version = 'chmod +x %s' % proton_version_pkg_file - daisy_cmn.subprocess_call(chmod_for_proton_version) - return proton_version_pkg_name - - -class ProtonShellExector(): - """ - Install proton bin. - """ - def __init__(self, mgt_ip, proton_version_name, task_type, rmc_ip=''): - self.task_type = task_type - self.mgt_ip = mgt_ip - self.proton_version_file = daisy_proton_path + proton_version_name - self.rmc_ip = rmc_ip - self.clush_cmd = "" - self.oper_type = { - 'install': self._install_proton, - 'uninstall': self._uninstall_proton - } - self.oper_shell = { - 'CMD_SSHPASS_PRE': "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s", - 'CMD_BIN_SCP': - "scp %(path)s root@%(ssh_ip)s:/home" % - {'path': self.proton_version_file, 'ssh_ip': mgt_ip}, - 'CMD_BIN_INSTALL': "sudo /home/%s install %s 7777" % - (proton_version_name, self.rmc_ip), - 'CMD_BIN_UNINSTALL': "sudo /home/%s uninstall" % - proton_version_name, - 'CMD_BIN_REMOVE': "sudo rm -rf /home/%s" % proton_version_name - } - - self._execute() - - def _install_proton(self): - self.clush_cmd = \ - "%s;%s" % ( - self.oper_shell['CMD_SSHPASS_PRE'] % - {"ssh_ip": "", "cmd": self.oper_shell['CMD_BIN_SCP']}, - self.oper_shell['CMD_SSHPASS_PRE'] % - { - "ssh_ip": "ssh " + self.mgt_ip, "cmd": - self.oper_shell['CMD_BIN_INSTALL'] - } - ) - - subprocess.check_output(self.clush_cmd, shell=True, - stderr=subprocess.STDOUT) - - def _uninstall_proton(self): - self.clush_cmd = \ - "%s;%s" % ( - self.oper_shell['CMD_SSHPASS_PRE'] % - {"ssh_ip": "", "cmd": self.oper_shell['CMD_BIN_SCP']}, - self.oper_shell['CMD_SSHPASS_PRE'] % - { - "ssh_ip": "ssh " + self.mgt_ip, - "cmd": self.oper_shell['CMD_BIN_UNINSTALL'] - } - ) - - subprocess.check_output(self.clush_cmd, shell=True, - stderr=subprocess.STDOUT) - - def _execute(self): - try: - if not self.task_type or not self.mgt_ip: - LOG.error(_("<<>>")) - return - - self.oper_type[self.task_type]() - except subprocess.CalledProcessError as e: - LOG.warn(_("<<>>" % e.output.strip())) - except Exception as e: - LOG.exception(_(e.message)) - else: - LOG.info(_("<<>>" % self.clush_cmd)) diff --git a/code/daisy/daisy/api/backends/proton/install.py b/code/daisy/daisy/api/backends/proton/install.py deleted file mode 100755 index 5b6d2372..00000000 --- a/code/daisy/daisy/api/backends/proton/install.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/install endpoint for proton API -""" -from oslo_log import log as logging -from threading import Thread - -from daisy import i18n -import daisy.api.v1 - -from daisy.common import exception -import daisy.api.backends.common as daisy_cmn -import daisy.api.backends.proton.common as proton_cmn - - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW -SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS -SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS -ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE - - -proton_state = proton_cmn.PROTON_STATE -daisy_proton_path = proton_cmn.daisy_proton_path - - -def get_proton_ip(req, role_hosts): - proton_ip_list = [] - for role_host in role_hosts: - host_detail = proton_cmn.get_host_detail(req, - role_host['host_id']) - for interface in host_detail['interfaces']: - for network in interface['assigned_networks']: - if network.get("name") == "MANAGEMENT": - proton_ip_list.append(network.get("ip")) - - return proton_ip_list - - -def get_proton_hosts(req, cluster_id): - all_roles = proton_cmn.get_roles_detail(req) - for role in all_roles: - if role['cluster_id'] == cluster_id and role['name'] == 'PROTON': - role_hosts = proton_cmn.get_hosts_of_role(req, role['id']) - - return get_proton_ip(req, role_hosts) - - -def get_rmc_host(req, cluster_id): - return "10.43.211.63" - - -class ProtonInstallTask(Thread): - """ - Class for install proton bin. - """ - def __init__(self, req, cluster_id): - super(ProtonInstallTask, self).__init__() - self.req = req - self.cluster_id = cluster_id - self.progress = 0 - self.message = "" - self.state = proton_state['INIT'] - self.proton_ip_list = [] - self.install_log_fp = None - self.last_line_num = 0 - self.need_install = False - self.ping_times = 36 - - def _update_install_progress_to_db(self): - """ - Update progress of intallation to db. - :return: - """ - roles = daisy_cmn.get_cluster_roles_detail(self.req, self.cluster_id) - for role in roles: - if role['deployment_backend'] != daisy_cmn.proton_backend_name: - continue - role_hosts = daisy_cmn.get_hosts_of_role(self.req, role['id']) - for role_host in role_hosts: - if role_host['status'] != proton_state['ACTIVE']: - self.need_install = True - role_host['status'] = self.state - daisy_cmn.update_role_host(self.req, role_host['id'], - role_host) - role['status'] = self.state - role['messages'] = self.message - daisy_cmn.update_role(self.req, role['id'], role) - - def run(self): - try: - self._run() - except (exception.InstallException, - exception.NotFound, - exception.InstallTimeoutException) as e: - LOG.exception(e.message) - else: - self.progress = 100 - self.state = proton_state['ACTIVE'] - self.message = "Proton install successfully" - LOG.info(_("Install PROTON for cluster %s successfully." % - self.cluster_id)) - finally: - self._update_install_progress_to_db() - - def _run(self): - """ - Exectue install file(.bin) with sync mode. - :return: - """ - if not self.cluster_id or not self.req: - raise exception.InstallException( - cluster_id=self.cluster_id, reason="invalid params.") - - self.proton_ip_list = get_proton_hosts(self.req, self.cluster_id) - unreached_hosts = daisy_cmn.check_ping_hosts(self.proton_ip_list, - self.ping_times) - if unreached_hosts: - self.state = proton_state['INSTALL_FAILED'] - self.message = "hosts %s ping failed" % unreached_hosts - raise exception.NotFound(message=self.message) - - proton_version_name = \ - proton_cmn.check_and_get_proton_version(daisy_proton_path) - if not proton_version_name: - self.state = proton_state['INSTALL_FAILED'] - self.message = "PROTON version file not found in %s" % \ - daisy_proton_path - raise exception.NotFound(message=self.message) - - rmc_ip = get_rmc_host(self.req, self.cluster_id) - - for proton_ip in self.proton_ip_list: - proton_cmn.ProtonShellExector(proton_ip, proton_version_name, - 'install', rmc_ip) diff --git a/code/daisy/daisy/api/backends/proton/uninstall.py b/code/daisy/daisy/api/backends/proton/uninstall.py deleted file mode 100755 index e8847fec..00000000 --- a/code/daisy/daisy/api/backends/proton/uninstall.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/hosts endpoint for Daisy v1 API -""" -import subprocess - -from oslo_log import log as logging -import threading -from daisy import i18n -import daisy.api.backends.common as daisy_cmn -import daisy.api.backends.proton.common as proton_cmn - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW - -proton_state = proton_cmn.PROTON_STATE -daisy_proton_path = proton_cmn.daisy_proton_path - -# uninstall init progress is 100, when uninstall succefully, -# uninstall progress is 0, and web display progress is reverted -uninstall_proton_progress = 100.0 -uninstall_mutex = threading.Lock() - - -def update_progress_to_db(req, role_id, status, progress_percentage_step=0.0): - """ - Write uninstall progress and status to db, we use global lock object - 'uninstall_mutex' to make sure this function is thread safety. - :param req: http req. - :param role_id_list: Column neeb be update in role table. - :param status: Uninstall status. - :return: - """ - global uninstall_mutex - global uninstall_proton_progress - uninstall_mutex.acquire(True) - uninstall_proton_progress -= progress_percentage_step - role = {} - - role_hosts = daisy_cmn.get_hosts_of_role(req, role_id) - if status == proton_state['UNINSTALLING']: - role['status'] = status - role['progress'] = uninstall_proton_progress - role['messages'] = 'Proton uninstalling' - for role_host in role_hosts: - role_host_meta = dict() - role_host_meta['status'] = status - role_host_meta['progress'] = uninstall_proton_progress - daisy_cmn.update_role_host(req, role_host['id'], role_host_meta) - if status == proton_state['UNINSTALL_FAILED']: - role['status'] = status - role['messages'] = 'Uninstall-failed' - for role_host in role_hosts: - role_host_meta = dict() - role_host_meta['status'] = status - daisy_cmn.update_role_host(req, role_host['id'], role_host_meta) - elif status == proton_state['INIT']: - role['status'] = status - role['progress'] = 0 - role['messages'] = 'Proton uninstall successfully' - daisy_cmn.delete_role_hosts(req, role_id) - - daisy_cmn.update_role(req, role_id, role) - uninstall_mutex.release() - - -def _thread_bin(req, host_ip, role_id, uninstall_progress_percentage): - try: - proton_version_name = \ - proton_cmn.check_and_get_proton_version(daisy_proton_path) - proton_cmn.ProtonShellExector(host_ip, proton_version_name, - 'uninstall') - except subprocess.CalledProcessError: - update_progress_to_db(req, role_id, proton_state['UNINSTALL_FAILED']) - LOG.info(_("Uninstall PROTON for %s failed!" % host_ip)) - else: - update_progress_to_db(req, role_id, proton_state['UNINSTALLING'], - uninstall_progress_percentage) - LOG.info(_("Uninstall PROTON for %s successfully!" % host_ip)) - - -def thread_bin(req, host_ip, role_id, uninstall_progress_percentage): - try: - _thread_bin(req, host_ip, role_id, uninstall_progress_percentage) - except Exception as e: - LOG.exception(e.message) diff --git a/code/daisy/daisy/api/backends/tecs/__init__.py b/code/daisy/daisy/api/backends/tecs/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/code/daisy/daisy/api/backends/tecs/api.py b/code/daisy/daisy/api/backends/tecs/api.py deleted file mode 100755 index 23e846b9..00000000 --- a/code/daisy/daisy/api/backends/tecs/api.py +++ /dev/null @@ -1,427 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/install endpoint for tecs API -""" -import subprocess -import commands - -from oslo_config import cfg -from oslo_log import log as logging -from webob.exc import HTTPBadRequest - -import threading - -from daisy import i18n - -from daisy.common import exception -from daisy.api.backends.tecs import config -from daisy.api.backends import driver -import daisy.api.backends.os as os_handle -import daisy.api.backends.common as daisy_cmn -import daisy.api.backends.tecs.common as tecs_cmn -import daisy.api.backends.tecs.install as instl -import daisy.api.backends.tecs.uninstall as unstl -import daisy.api.backends.tecs.upgrade as upgrd -import daisy.api.backends.tecs.disk_array as disk_array -from daisy.api.backends.tecs import write_configs -import daisy.registry.client.v1.api as registry - - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW -CONF = cfg.CONF -upgrade_opts = [ - cfg.StrOpt('max_parallel_os_upgrade_number', default=10, - help='Maximum number of hosts upgrade os at the same time.'), -] -CONF.register_opts(upgrade_opts) - -tecs_state = tecs_cmn.TECS_STATE -daisy_tecs_path = tecs_cmn.daisy_tecs_path - - -class API(driver.DeploymentDriver): - - """ - The hosts API is a RESTful web service for host data. The API - is as follows:: - - GET /hosts -- Returns a set of brief metadata about hosts - GET /hosts/detail -- Returns a set of detailed metadata about - hosts - HEAD /hosts/ -- Return metadata about an host with id - GET /hosts/ -- Return host data for host with id - POST /hosts -- Store host data and return metadata about the - newly-stored host - PUT /hosts/ -- Update host metadata and/or upload host - data for a previously-reserved host - DELETE /hosts/ -- Delete the host with id - """ - - def __init__(self): - super(API, self).__init__() - return - - def install(self, req, cluster_id): - """ - Install TECS to a cluster. - - param req: The WSGI/Webob Request object - cluster_id:cluster id - """ - write_configs.update_configset(req, cluster_id) - - tecs_install_task = instl.TECSInstallTask(req, cluster_id) - tecs_install_task.start() - - def _get_roles_and_hosts_ip_list(self, req, cluster_id): - role_host_ips = {'ha': set(), 'lb': set(), 'all': set()} - role_id_list = set() - hosts_id_list = [] - hosts_list = [] - tecs_install_failed_list = set() - - roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) - cluster_networks = daisy_cmn.get_cluster_networks_detail( - req, cluster_id) - for role in roles: - if role['deployment_backend'] != daisy_cmn.tecs_backend_name: - continue - role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) - if role_hosts: - for role_host in role_hosts: - host = daisy_cmn.get_host_detail(req, role_host['host_id']) - host_ip = tecs_cmn.get_host_network_ip( - req, host, cluster_networks, 'MANAGEMENT') - if role['name'] == "CONTROLLER_HA": - role_host_ips['ha'].add(host_ip) - if role['name'] == "CONTROLLER_LB": - role_host_ips['lb'].add(host_ip) - role_host_ips['all'].add(host_ip) - hosts_id_list.append({host['id']: host_ip}) - if role_host['status'] == tecs_state['INSTALL_FAILED']: - tecs_install_failed_list.add(host_ip) - role_id_list.add(role['id']) - for host in hosts_id_list: - if host not in hosts_list: - hosts_list.append(host) - return (role_id_list, role_host_ips, - hosts_list, tecs_install_failed_list) - - def _query_progress(self, req, cluster_id, action=""): - nodes_list = [] - roles = daisy_cmn.get_roles_detail(req) - (role_id_list, role_host_ips, hosts_list, tecs_install_failed_list) =\ - self._get_roles_and_hosts_ip_list(req, cluster_id) - for host in hosts_list: - node = {} - host_id = host.keys()[0] - host = daisy_cmn.get_host_detail(req, host_id) - node['id'] = host['id'] - node['name'] = host['name'] - - if 0 == cmp("upgrade", action): - node['os-progress'] = host['os_progress'] - node['os-status'] = host['os_status'] - node['os-messages'] = host['messages'] - - if host['status'] == "with-role": - host_roles = [role for role in roles if role['name'] in host[ - 'role'] and role['cluster_id'] == cluster_id] - if host_roles: - node['role-status'] = host_roles[0]['status'] - node['role-progress'] = str(host_roles[0]['progress']) - # node['role-message'] = host_roles[0]['messages'] - nodes_list.append(node) - if nodes_list: - return {'tecs_nodes': nodes_list} - else: - return {'tecs_nodes': "TECS uninstall successfully,\ - the host has been removed from the host_roles table"} - - def _modify_running_version_of_configs(self, req, - running_version, cluster_id): - cluster_configs_list = daisy_cmn.get_cluster_configs_list(req, - cluster_id) - if cluster_configs_list: - for cluster_config in cluster_configs_list: - registry.update_config_metadata(req.context, - cluster_config['id'], - {'running_version': - running_version}) - - def uninstall(self, req, cluster_id): - """ - Uninstall TECS to a cluster. - - :param req: The WSGI/Webob Request object - - :raises HTTPBadRequest if x-install-cluster is missing - """ - (role_id_list, role_host_ips, hosts_list, tecs_install_failed_list) =\ - self._get_roles_and_hosts_ip_list(req, cluster_id) - if role_id_list: - if not role_host_ips['all']: - msg = _("there is no host in cluster %s") % cluster_id - raise exception.ThreadBinException(msg) - - unstl.update_progress_to_db( - req, role_id_list, tecs_state['UNINSTALLING'], hosts_list) - - threads = [] - for host_ip in role_host_ips['all']: - t = threading.Thread( - target=unstl.thread_bin, args=(req, host_ip, role_id_list, - hosts_list)) - t.setDaemon(True) - t.start() - threads.append(t) - LOG.info(_("Uninstall threads have started, please waiting....")) - - try: - for t in threads: - t.join() - except: - LOG.warn(_("Join uninstall thread %s failed!" % t)) - else: - uninstall_failed_flag = False - for role_id in role_id_list: - role_hosts = daisy_cmn.get_hosts_of_role(req, role_id) - for role_host in role_hosts: - if role_host['status'] ==\ - tecs_state['UNINSTALL_FAILED']: - unstl.update_progress_to_db( - req, role_id_list, tecs_state[ - 'UNINSTALL_FAILED'], hosts_list) - uninstall_failed_flag = True - break - if not uninstall_failed_flag: - LOG.info( - _("All uninstall threads have done,\ - set all roles status to 'init'!")) - unstl.update_progress_to_db( - req, role_id_list, tecs_state['INIT'], hosts_list) - LOG.info(_("modify the running_version of configs to 0")) - running_version = 0 - self._modify_running_version_of_configs( - req, running_version, cluster_id) - tecs_cmn.inform_provider_cloud_state(req.context, cluster_id, - operation='delete') - try: - (status, output) = commands.getstatusoutput('rpm -e --nodeps openstack-packstack\ - openstack-packstack-puppet \ - openstack-puppet-modules puppet') - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - - def uninstall_progress(self, req, cluster_id): - return self._query_progress(req, cluster_id, "uninstall") - - def upgrade(self, req, cluster_id): - """ - update TECS to a cluster. - - :param req: The WSGI/Webob Request object - - :raises HTTPBadRequest if x-install-cluster is missing - """ - # daisy_update_path = '/home/daisy_update/' - - (role_id_list, role_host_ips, hosts_list, tecs_install_failed_list) =\ - self._get_roles_and_hosts_ip_list(req, cluster_id) - if role_id_list: - if not role_host_ips['all']: - msg = _("there is no host in cluster %s") % cluster_id - raise exception.ThreadBinException(msg) - unreached_hosts = daisy_cmn.check_ping_hosts( - role_host_ips['all'], 1) - if unreached_hosts: - self.message = "hosts %s ping failed" % unreached_hosts - raise exception.NotFound(message=self.message) - daisy_cmn.subprocess_call('rm -rf /root/.ssh/known_hosts') - if os_handle.check_tfg_exist(): - os_handle.upgrade_os(req, hosts_list) - unreached_hosts = daisy_cmn.check_ping_hosts( - role_host_ips['all'], 30) - if unreached_hosts: - self.message = "hosts %s ping failed after tfg upgrade" \ - % unreached_hosts - raise exception.NotFound(message=self.message) - # check and get TECS version - tecs_version_pkg_file = tecs_cmn.check_and_get_tecs_version( - tecs_cmn.daisy_tecs_path) - if not tecs_version_pkg_file: - self.state = tecs_state['UPDATE_FAILED'] - self.message = "TECS version file not found in %s"\ - % tecs_cmn.daisy_tecs_path - raise exception.NotFound(message=self.message) - threads = [] - LOG.info( - _("Begin to update TECS controller nodes, please waiting....")) - upgrd.update_progress_to_db( - req, role_id_list, tecs_state['UPDATING'], hosts_list) - for host_ip in role_host_ips['ha']: - if host_ip in tecs_install_failed_list: - continue - LOG.info(_("Update TECS controller node %s..." % host_ip)) - rc = upgrd.thread_bin(req, role_id_list, host_ip, hosts_list) - if rc == 0: - LOG.info(_("Update TECS for %s successfully" % host_ip)) - else: - LOG.info( - _("Update TECS failed for %s, return %s" - % (host_ip, rc))) - return - - LOG.info(_("Begin to update TECS other nodes, please waiting....")) - max_parallel_upgrade_number = int( - CONF.max_parallel_os_upgrade_number) - compute_ip_list = role_host_ips[ - 'all'] - role_host_ips['ha'] - tecs_install_failed_list - while compute_ip_list: - threads = [] - if len(compute_ip_list) > max_parallel_upgrade_number: - upgrade_hosts = compute_ip_list[ - :max_parallel_upgrade_number] - compute_ip_list = compute_ip_list[ - max_parallel_upgrade_number:] - else: - upgrade_hosts = compute_ip_list - compute_ip_list = [] - for host_ip in upgrade_hosts: - t = threading.Thread( - target=upgrd.thread_bin, - args=(req, role_id_list, host_ip, hosts_list)) - t.setDaemon(True) - t.start() - threads.append(t) - try: - for t in threads: - t.join() - except: - LOG.warn(_("Join update thread %s failed!" % t)) - - for role_id in role_id_list: - role_hosts = daisy_cmn.get_hosts_of_role(req, role_id) - for role_host in role_hosts: - if (role_host['status'] == tecs_state['UPDATE_FAILED'] or - role_host['status'] == tecs_state['UPDATING']): - role_id = [role_host['role_id']] - upgrd.update_progress_to_db(req, - role_id, - tecs_state[ - 'UPDATE_FAILED'], - hosts_list) - break - elif role_host['status'] == tecs_state['ACTIVE']: - role_id = [role_host['role_id']] - upgrd.update_progress_to_db(req, - role_id, - tecs_state['ACTIVE'], - hosts_list) - - def upgrade_progress(self, req, cluster_id): - return self._query_progress(req, cluster_id, "upgrade") - - def export_db(self, req, cluster_id): - """ - Export daisy db data to tecs.conf and HA.conf. - - :param req: The WSGI/Webob Request object - - :raises HTTPBadRequest if x-install-cluster is missing - """ - - tecs_config =\ - instl.get_cluster_tecs_config(req, cluster_id) - - config_files = {'tecs_conf': '', 'ha_conf': ''} - tecs_install_path = "/home/tecs_install" - if tecs_config: - cluster_conf_path = tecs_install_path + "/" + cluster_id - create_cluster_conf_path =\ - "rm -rf %s;mkdir %s" % (cluster_conf_path, cluster_conf_path) - daisy_cmn.subprocess_call(create_cluster_conf_path) - config.update_tecs_config(tecs_config, cluster_conf_path) - - get_tecs_conf = "ls %s|grep tecs.conf" % cluster_conf_path - obj = subprocess.Popen(get_tecs_conf, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - (stdoutput, erroutput) = obj.communicate() - tecs_conf_file = "" - if stdoutput: - tecs_conf_file = stdoutput.split('\n')[0] - config_files['tecs_conf'] =\ - cluster_conf_path + "/" + tecs_conf_file - - get_ha_conf_cmd = "ls %s|grep HA_1.conf" % cluster_conf_path - obj = subprocess.Popen(get_ha_conf_cmd, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - (stdoutput, erroutput) = obj.communicate() - ha_conf_file = "" - if stdoutput: - ha_conf_file = stdoutput.split('\n')[0] - config_files['ha_conf'] =\ - cluster_conf_path + "/" + ha_conf_file - else: - LOG.info(_("No TECS config files generated.")) - - return config_files - - def update_disk_array(self, req, cluster_id): - (share_disk_info, volume_disk_info) =\ - disk_array.get_disk_array_info(req, cluster_id) - array_nodes_addr =\ - tecs_cmn.get_disk_array_nodes_addr(req, cluster_id) - - ha_nodes_ip = array_nodes_addr['ha'].keys() - all_nodes_ip = list(array_nodes_addr['computer']) + ha_nodes_ip - - if all_nodes_ip: - compute_error_msg =\ - disk_array.config_compute_multipath(all_nodes_ip) - if compute_error_msg: - return compute_error_msg - else: - LOG.info(_("Config Disk Array multipath successfully")) - - if share_disk_info: - ha_error_msg =\ - disk_array.config_ha_share_disk(share_disk_info, - array_nodes_addr['ha']) - if ha_error_msg: - return ha_error_msg - else: - LOG.info(_("Config Disk Array for HA nodes successfully")) - - if volume_disk_info: - cinder_error_msg =\ - disk_array.config_ha_cinder_volume(volume_disk_info, - ha_nodes_ip) - if cinder_error_msg: - return cinder_error_msg - else: - LOG.info(_("Config cinder volume for HA nodes successfully")) - - return 'update successfully' diff --git a/code/daisy/daisy/api/backends/tecs/common.py b/code/daisy/daisy/api/backends/tecs/common.py deleted file mode 100755 index 0c36872d..00000000 --- a/code/daisy/daisy/api/backends/tecs/common.py +++ /dev/null @@ -1,496 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/install endpoint for tecs API -""" -import os -import copy -import subprocess -import re -from oslo_log import log as logging -from webob.exc import HTTPBadRequest -from webob.exc import HTTPForbidden -from daisy import i18n -from daisy.common import utils - -from daisy.common import exception -import daisy.registry.client.v1.api as registry -import daisy.api.backends.common as daisy_cmn -from daisyclient.v1 import client as daisy_client -import ConfigParser - - -STR_MASK = '*' * 8 -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW - -daisy_tecs_path = '/var/lib/daisy/tecs/' -tecs_install_path = '/home/tecs_install' - -TECS_STATE = { - 'INIT': 'init', - 'INSTALLING': 'installing', - 'ACTIVE': 'active', - 'INSTALL_FAILED': 'install-failed', - 'UNINSTALLING': 'uninstalling', - 'UNINSTALL_FAILED': 'uninstall-failed', - 'UPDATING': 'updating', - 'UPDATE_FAILED': 'update-failed', -} - - -def get_daisyclient(): - """Get Daisy client instance.""" - config_daisy = ConfigParser.ConfigParser() - config_daisy.read("/etc/daisy/daisy-api.conf") - daisy_port = config_daisy.get("DEFAULT", "bind_port") - args = {'version': 1.0, 'endpoint': 'http://127.0.0.1:' + daisy_port} - return daisy_client.Client(**args) - - -def mkdir_tecs_install(host_ips=None): - if not host_ips: - cmd = "mkdir -p %s" % tecs_install_path - daisy_cmn.subprocess_call(cmd) - return - for host_ip in host_ips: - cmd = 'clush -S -w %s "mkdir -p %s"' % (host_ip, tecs_install_path) - daisy_cmn.subprocess_call(cmd) - - -def _get_cluster_network(cluster_networks, network_name): - network = [cn for cn in cluster_networks if cn['name'] == network_name] - if not network or not network[0]: - msg = "network %s is not exist" % (network_name) - raise exception.InvalidNetworkConfig(msg) - else: - return network[0] - - -def get_host_interface_by_network(host_detail, network_name): - host_detail_info = copy.deepcopy(host_detail) - interface_list = [hi for hi in host_detail_info['interfaces'] - for assigned_network in hi['assigned_networks'] - if assigned_network and - network_name == assigned_network['name']] - interface = {} - if interface_list: - interface = interface_list[0] - - if not interface and 'MANAGEMENT' == network_name: - msg = "network %s of host %s is not exist" % ( - network_name, host_detail_info['id']) - raise exception.InvalidNetworkConfig(msg) - - return interface - - -def get_host_network_ip(req, host_detail, cluster_networks, network_name): - interface_network_ip = '' - host_interface = get_host_interface_by_network(host_detail, network_name) - if host_interface: - network = _get_cluster_network(cluster_networks, network_name) - assigned_network = daisy_cmn.get_assigned_network(req, - host_interface['id'], - network['id']) - interface_network_ip = assigned_network['ip'] - - if not interface_network_ip and 'MANAGEMENT' == network_name: - msg = "%s network ip of host %s can't be empty" % ( - network_name, host_detail['id']) - raise exception.InvalidNetworkConfig(msg) - return interface_network_ip - - -def get_storage_name_ip_dict(req, cluster_id, network_type): - name_ip_list = [] - ip_list = [] - roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) - cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) - - networks_list = [network for network in cluster_networks - if network['network_type'] == network_type] - networks_name_list = [network['name'] for network in networks_list] - - for role in roles: - role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) - for role_host in role_hosts: - host_detail = daisy_cmn.get_host_detail(req, role_host['host_id']) - for network_name in networks_name_list: - ip = get_host_network_ip(req, host_detail, cluster_networks, - network_name) - - name_ip_dict = {} - if ip and ip not in ip_list: - ip_list.append(ip) - name_ip_dict.update({host_detail['name'] + '.' + - network_name: ip}) - name_ip_list.append(name_ip_dict) - - return name_ip_list - - -def get_network_netmask(cluster_networks, network_name): - network = _get_cluster_network(cluster_networks, network_name) - cidr = network['cidr'] - if not cidr: - msg = "cidr of network %s is not exist" % (network_name) - raise exception.InvalidNetworkConfig(msg) - - netmask = daisy_cmn.cidr_to_netmask(cidr) - if not netmask: - msg = "netmask of network %s is not exist" % (network_name) - raise exception.InvalidNetworkConfig(msg) - return netmask - - -# every host only have one gateway -def get_network_gateway(cluster_networks, network_name): - network = _get_cluster_network(cluster_networks, network_name) - gateway = network['gateway'] - return gateway - - -def get_network_cidr(cluster_networks, network_name): - network = _get_cluster_network(cluster_networks, network_name) - cidr = network['cidr'] - if not cidr: - msg = "cidr of network %s is not exist" % (network_name) - raise exception.InvalidNetworkConfig(msg) - return cidr - - -def get_mngt_network_vlan_id(cluster_networks): - mgnt_vlan_id = "" - management_network = [network for network in cluster_networks if network[ - 'network_type'] == 'MANAGEMENT'] - if (not management_network or - not management_network[0] or - # not management_network[0].has_key('vlan_id')): - 'vlan_id' not in management_network[0]): - msg = "can't get management network vlan id" - raise exception.InvalidNetworkConfig(msg) - else: - mgnt_vlan_id = management_network[0]['vlan_id'] - return mgnt_vlan_id - - -def get_network_vlan_id(cluster_networks, network_type): - vlan_id = "" - general_network = [network for network in cluster_networks - if network['network_type'] == network_type] - if (not general_network or not general_network[0] or - # not general_network[0].has_key('vlan_id')): - 'vlan_id' not in general_network[0]): - msg = "can't get %s network vlan id" % network_type - raise exception.InvalidNetworkConfig(msg) - else: - vlan_id = general_network[0]['vlan_id'] - return vlan_id - - -def sort_interfaces_by_pci(networks, host_detail): - """ - Sort interfaces by pci segment, if interface type is bond, - user the pci of first memeber nic.This function is fix bug for - the name length of ovs virtual port, because if the name length large than - 15 characters, the port will create failed. - :param interfaces: interfaces info of the host - :return: - """ - interfaces = eval(host_detail.get('interfaces', None)) \ - if isinstance(host_detail, unicode) else \ - host_detail.get('interfaces', None) - if not interfaces: - LOG.info("This host has no interfaces info.") - return host_detail - - tmp_interfaces = copy.deepcopy(interfaces) - - slaves_name_list = [] - for interface in tmp_interfaces: - if interface.get('type', None) == "bond" and\ - interface.get('slave1', None) and\ - interface.get('slave2', None): - slaves_name_list.append(interface['slave1']) - slaves_name_list.append(interface['slave2']) - - for interface in interfaces: - if interface.get('name') not in slaves_name_list: - vlan_id_len_list = [len(network['vlan_id']) - for assigned_network in interface.get( - 'assigned_networks', []) - for network in networks - if assigned_network.get('name') == - network.get('name') and network.get('vlan_id')] - max_vlan_id_len = max(vlan_id_len_list) if vlan_id_len_list else 0 - interface_name_len = len(interface['name']) - redundant_bit = interface_name_len + max_vlan_id_len - 14 - interface['name'] = interface['name'][ - redundant_bit:] if redundant_bit > 0 else interface['name'] - return host_detail - - -def check_and_get_tecs_version(daisy_tecs_pkg_path): - tecs_version_pkg_file = "" - get_tecs_version_pkg = "ls %s| grep ^ZXTECS.*\.bin$" % daisy_tecs_pkg_path - obj = subprocess.Popen(get_tecs_version_pkg, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - (stdoutput, erroutput) = obj.communicate() - if stdoutput: - tecs_version_pkg_name = stdoutput.split('\n')[0] - tecs_version_pkg_file = daisy_tecs_pkg_path + tecs_version_pkg_name - chmod_for_tecs_version = 'chmod +x %s' % tecs_version_pkg_file - daisy_cmn.subprocess_call(chmod_for_tecs_version) - return tecs_version_pkg_file - - -def get_service_disk_list(req, params): - try: - service_disks = registry.list_service_disk_metadata( - req.context, **params) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return service_disks - - -def get_cinder_volume_list(req, params): - try: - cinder_volumes = registry.list_cinder_volume_metadata( - req.context, **params) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return cinder_volumes - - -def mask_string(unmasked, mask_list=None, replace_list=None): - """ - Replaces words from mask_list with MASK in unmasked string. - If words are needed to be transformed before masking, transformation - could be describe in replace list. For example [("'","'\\''")] - replaces all ' characters with '\\''. - """ - mask_list = mask_list or [] - replace_list = replace_list or [] - - masked = unmasked - for word in sorted(mask_list, lambda x, y: len(y) - len(x)): - if not word: - continue - for before, after in replace_list: - word = word.replace(before, after) - masked = masked.replace(word, STR_MASK) - return masked - - -def run_scrip(script, ip=None, password=None, msg=None): - try: - _run_scrip(script, ip, password) - except: - msg1 = 'Error occurred during running scripts.' - message = msg1 + msg if msg else msg1 - LOG.error(message) - raise HTTPForbidden(explanation=message) - else: - LOG.info('Running scripts successfully!') - - -def _run_scrip(script, ip=None, password=None): - mask_list = [] - repl_list = [("'", "'\\''")] - script = "\n".join(script) - _PIPE = subprocess.PIPE - if ip: - cmd = ["sshpass", "-p", "%s" % password, - "ssh", "-o StrictHostKeyChecking=no", - "%s" % ip, "bash -x"] - else: - cmd = ["bash", "-x"] - environ = os.environ - environ['LANG'] = 'en_US.UTF8' - obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, - close_fds=True, shell=False, env=environ) - - script = "function t(){ exit $? ; } \n trap t ERR \n" + script - out, err = obj.communicate(script) - masked_out = mask_string(out, mask_list, repl_list) - masked_err = mask_string(err, mask_list, repl_list) - if obj.returncode: - pattern = (r'^ssh\:') - if re.search(pattern, err): - LOG.error(_("Network error occured when run script.")) - raise exception.NetworkError(masked_err, stdout=out, stderr=err) - else: - msg = ('Failed to run remote script, stdout: %s\nstderr: %s' % - (masked_out, masked_err)) - LOG.error(msg) - raise exception.ScriptRuntimeError(msg, stdout=out, stderr=err) - return obj.returncode, out - - -def inform_provider_cloud_state(context, cluster_id, **kwargs): - params = dict() - daisyclient = get_daisyclient() - cluster = registry.get_cluster_metadata(context, cluster_id) - params['operation'] = kwargs.get('operation') - params['name'] = cluster.get('name') - params['url'] = "http://" + cluster.get('public_vip') - params['provider_ip'] = cluster.get('hwm_ip') - daisyclient.node.cloud_state(**params) - - -def get_disk_array_nodes_addr(req, cluster_id): - controller_ha_nodes = {} - computer_ips = set() - - roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) - cluster_networks =\ - daisy_cmn.get_cluster_networks_detail(req, cluster_id) - for role in roles: - if role['deployment_backend'] != daisy_cmn.tecs_backend_name: - continue - role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) - for role_host in role_hosts: - # host has installed tecs are exclusive - if (role_host['status'] == TECS_STATE['ACTIVE'] or - role_host['status'] == TECS_STATE['UPDATING'] or - role_host['status'] == TECS_STATE['UPDATE_FAILED']): - continue - host_detail = daisy_cmn.get_host_detail(req, - role_host['host_id']) - host_ip = get_host_network_ip(req, - host_detail, - cluster_networks, - 'MANAGEMENT') - if role['name'] == "CONTROLLER_HA": - min_mac = utils.get_host_min_mac(host_detail['interfaces']) - controller_ha_nodes[host_ip] = min_mac - if role['name'] == "COMPUTER": - computer_ips.add(host_ip) - return {'ha': controller_ha_nodes, 'computer': computer_ips} - - -def get_ctl_ha_nodes_min_mac(req, cluster_id): - ''' - ctl_ha_nodes_min_mac = {'host_name1':'min_mac1', ...} - ''' - ctl_ha_nodes_min_mac = {} - roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) - cluster_networks =\ - daisy_cmn.get_cluster_networks_detail(req, cluster_id) - for role in roles: - if role['deployment_backend'] != daisy_cmn.tecs_backend_name: - continue - role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) - for role_host in role_hosts: - # host has installed tecs are exclusive - if (role_host['status'] == TECS_STATE['ACTIVE'] or - role_host['status'] == TECS_STATE['UPDATING'] or - role_host['status'] == TECS_STATE['UPDATE_FAILED']): - continue - host_detail = daisy_cmn.get_host_detail(req, - role_host['host_id']) - host_name = host_detail['name'] - if role['name'] == "CONTROLLER_HA": - min_mac = utils.get_host_min_mac(host_detail['interfaces']) - ctl_ha_nodes_min_mac[host_name] = min_mac - return ctl_ha_nodes_min_mac - - -class TecsShellExector(object): - - """ - Class config task before install tecs bin. - """ - - def __init__(self, mgnt_ip, task_type, params={}): - self.task_type = task_type - self.mgnt_ip = mgnt_ip - self.params = params - self.clush_cmd = "" - self.rpm_name =\ - daisy_cmn.get_rpm_package_by_name(daisy_tecs_path, - 'network-configuration') - self.NETCFG_RPM_PATH = daisy_tecs_path + self.rpm_name - self.oper_type = { - 'install_rpm': self._install_netcfg_rpm, - 'uninstall_rpm': self._uninstall_netcfg_rpm, - 'update_rpm': self._update_netcfg_rpm, - } - self.oper_shell = { - 'CMD_SSHPASS_PRE': "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s", - 'CMD_RPM_UNINSTALL': "rpm -e network-configuration", - 'CMD_RPM_INSTALL': "rpm -i /home/%(rpm)s" % {'rpm': self.rpm_name}, - 'CMD_RPM_UPDATE': "rpm -U /home/%(rpm)s" % {'rpm': self.rpm_name}, - 'CMD_RPM_SCP': "scp -o StrictHostKeyChecking=no \ - %(path)s root@%(ssh_ip)s:/home" % - {'path': self.NETCFG_RPM_PATH, 'ssh_ip': mgnt_ip} - } - LOG.info(_("<<>>" % self.rpm_name)) - self._execute() - - def _uninstall_netcfg_rpm(self): - self.clush_cmd = self.oper_shell['CMD_SSHPASS_PRE'] % \ - {"ssh_ip": "ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, - "cmd": self.oper_shell['CMD_RPM_UNINSTALL']} - subprocess.check_output( - self.clush_cmd, shell=True, stderr=subprocess.STDOUT) - - def _update_netcfg_rpm(self): - self.clush_cmd = self.oper_shell['CMD_SSHPASS_PRE'] % \ - {"ssh_ip": "ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, - "cmd": self.oper_shell['CMD_RPM_UPDATE']} - subprocess.check_output( - self.clush_cmd, shell=True, stderr=subprocess.STDOUT) - - def _install_netcfg_rpm(self): - if not os.path.exists(self.NETCFG_RPM_PATH): - LOG.error(_("<<>>" % self.NETCFG_RPM_PATH)) - return - - self.clush_cmd = "%s;%s" % \ - (self.oper_shell['CMD_SSHPASS_PRE'] % - {"ssh_ip": "", "cmd": self.oper_shell['CMD_RPM_SCP']}, - self.oper_shell['CMD_SSHPASS_PRE'] % - {"ssh_ip": "ssh -o StrictHostKeyChecking=no " + - self.mgnt_ip, "cmd": self.oper_shell['CMD_RPM_INSTALL']}) - subprocess.check_output( - self.clush_cmd, shell=True, stderr=subprocess.STDOUT) - - def _execute(self): - try: - if not self.task_type or not self.mgnt_ip: - LOG.error( - _("<<>>" % self.mgnt_ip, )) - return - - self.oper_type[self.task_type]() - except subprocess.CalledProcessError as e: - LOG.warn(_("<<>>" % ( - self.mgnt_ip, e.output.strip()))) - except Exception as e: - LOG.exception(_(e.message)) - else: - LOG.info(_("<<>>" % ( - self.clush_cmd, self.mgnt_ip))) diff --git a/code/daisy/daisy/api/backends/tecs/config.py b/code/daisy/daisy/api/backends/tecs/config.py deleted file mode 100755 index 1869b41d..00000000 --- a/code/daisy/daisy/api/backends/tecs/config.py +++ /dev/null @@ -1,976 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import re -import commands -import types -import subprocess -import socket -import netaddr -from oslo_log import log as logging -from ConfigParser import ConfigParser -from daisy.common import exception -from daisy import i18n - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW - -service_map = { - 'lb': 'haproxy', - 'mongodb': 'mongod', - 'ha': '', - 'mariadb': 'mariadb', - 'amqp': 'rabbitmq-server', - 'ceilometer-api': 'openstack-ceilometer-api', - 'ceilometer-collector': 'openstack-ceilometer-collector,\ - openstack-ceilometer-mend', - 'ceilometer-central': 'openstack-ceilometer-central', - 'ceilometer-notification': 'openstack-ceilometer-notification', - 'ceilometer-alarm': 'openstack-ceilometer-alarm-evaluator,\ - openstack-ceilometer-alarm-notifier', - 'heat-api': 'openstack-heat-api', - 'heat-api-cfn': 'openstack-heat-api-cfn', - 'heat-engine': 'openstack-heat-engine', - 'ironic': 'openstack-ironic-api,openstack-ironic-conductor', - 'horizon': 'httpd,opencos-alarmmanager', - 'keystone': 'openstack-keystone', - 'glance': 'openstack-glance-api,openstack-glance-registry', - 'cinder-volume': 'openstack-cinder-volume', - 'cinder-scheduler': 'openstack-cinder-scheduler', - 'cinder-api': 'openstack-cinder-api', - 'neutron-metadata': 'neutron-metadata-agent', - 'neutron-lbaas': 'neutron-lbaas-agent', - 'neutron-dhcp': 'neutron-dhcp-agent', - 'neutron-server': 'neutron-server', - 'neutron-l3': 'neutron-l3-agent', - 'compute': 'openstack-nova-compute', - 'nova-cert': 'openstack-nova-cert', - 'nova-sched': 'openstack-nova-scheduler', - 'nova-vncproxy': 'openstack-nova-novncproxy,openstack-nova-consoleauth', - 'nova-conductor': 'openstack-nova-conductor', - 'nova-api': 'openstack-nova-api', - 'nova-cells': 'openstack-nova-cells', - 'camellia-api': 'camellia-api' -} - - -def add_service_with_host(services, name, host): - if name not in services: - services[name] = [] - services[name].append(host) - - -def add_service_with_hosts(services, name, hosts): - if name not in services: - services[name] = [] - for h in hosts: - services[name].append(h['management']['ip']) - - -def test_ping(ping_src_nic, ping_desc_ips): - ping_cmd = 'fping' - for ip in set(ping_desc_ips): - ping_cmd = ping_cmd + ' -I ' + ping_src_nic + ' ' + ip - obj = subprocess.Popen( - ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (stdoutput, erroutput) = obj.communicate() - _returncode = obj.returncode - if _returncode == 0 or _returncode == 1: - ping_result = stdoutput.split('\n') - if "No such device" in erroutput: - return [] - reachable_hosts = [result.split( - )[0] for result in ping_result if result and - result.split()[2] == 'alive'] - else: - msg = "ping failed beaceuse there is invlid ip in %s" % ping_desc_ips - raise exception.InvalidIP(msg) - return reachable_hosts - - -def get_local_deployment_ip(tecs_deployment_ips): - (status, output) = commands.getstatusoutput('ifconfig') - netcard_pattern = re.compile('\S*: ') - ip_str = '([0-9]{1,3}\.){3}[0-9]{1,3}' - # ip_pattern = re.compile('(inet %s)' % ip_str) - pattern = re.compile(ip_str) - nic_ip = {} - for netcard in re.finditer(netcard_pattern, str(output)): - nic_name = netcard.group().split(': ')[0] - if nic_name == "lo": - continue - ifconfig_nic_cmd = "ifconfig %s" % nic_name - (status, output) = commands.getstatusoutput(ifconfig_nic_cmd) - if status: - continue - ip = pattern.search(str(output)) - if ip and ip.group() != "127.0.0.1": - nic_ip[nic_name] = ip.group() - - deployment_ip = '' - for nic in nic_ip.keys(): - if nic_ip[nic] in tecs_deployment_ips: - deployment_ip = nic_ip[nic] - break - if not deployment_ip: - for nic, ip in nic_ip.items(): - if test_ping(nic, tecs_deployment_ips): - deployment_ip = nic_ip[nic] - break - return deployment_ip - - -class AnalsyConfig(object): - - def __init__(self, all_configs): - self.all_configs = all_configs - - self.services = {} - self.components = [] - self.modes = {} - # self.ha_conf = {} - self.services_in_component = {} - # self.heartbeat = {} - self.lb_components = [] - self.heartbeats = [[], [], []] - self.lb_vip = '' - self.ha_vip = '' - self.db_vip = '' - self.glance_vip = '' - self.public_vip = '' - self.share_disk_services = [] - self.share_cluster_disk_services = [] - self.ha_conf = {} - self.child_cell_dict = {} - self.ha_master_host = {} - - def get_heartbeats(self, host_interfaces): - for network in host_interfaces: - self.heartbeats[0].append(network["management"]["ip"]) - # if network.has_key("heartbeat1") and network["heartbeat1"]["ip"]: - if "heartbeat1" in network and network["heartbeat1"]["ip"]: - self.heartbeats[1].append(network["heartbeat1"]["ip"]) - - # if network.has_key("heartbeat2") and network["heartbeat2"]["ip"]: - if "heartbeat2" in network and network["heartbeat2"]["ip"]: - self.heartbeats[2].append(network["heartbeat2"]["ip"]) - - # if network.has_key("storage") and network["storage"]["ip"]: - if "storage" in network and network["storage"]["ip"]: - # if not network.has_key("heartbeat1"): - if "heartbeat1" not in network: - self.heartbeats[1].append(network["storage"]["ip"]) - # if network.has_key("heartbeat1") and not \ - # network.has_key("heartbeat2"): - if "heartbeat1" in network and \ - "heartbeat2" not in network: - self.heartbeats[2].append(network["storage"]["ip"]) - - # delete empty heartbeat line - if not self.heartbeats[0]: - self.heartbeats[0] = self.heartbeats[1] - self.heartbeats[1] = self.heartbeats[2] - if not self.heartbeats[1]: - self.heartbeats[1] = self.heartbeats[2] - - # remove repeated ip - if set(self.heartbeats[1]) == set(self.heartbeats[0]): - self.heartbeats[1] = [] - if set(self.heartbeats[2]) != set(self.heartbeats[0]): - self.heartbeats[1] = self.heartbeats[2] - self.heartbeats[2] = [] - if set(self.heartbeats[2]) == set(self.heartbeats[0]) or \ - set(self.heartbeats[2]) == set(self.heartbeats[1]): - self.heartbeats[2] = [] - - def prepare_child_cell(self, child_cell_name, configs): - cell_compute_hosts = str() - cell_compute_name = child_cell_name[11:] + '_COMPUTER' - for role_name, role_configs in self.all_configs.items(): - if role_name == cell_compute_name: - cell_compute_host = [ - host_interface['management']['ip'] - for host_interface in role_configs['host_interfaces']] - cell_compute_hosts = ",".join(cell_compute_host) - self.all_configs.pop(role_name) - - child_cell_host = configs['host_interfaces'][0]['management']['ip'] - self.child_cell_dict[repr(child_cell_host).strip("u'")] \ - = repr(cell_compute_hosts).strip("u'") - - def prepare_ha_lb(self, role_configs, is_ha, is_lb): - if is_lb: - self.ha_master_host['ip'] = role_configs[ - 'host_interfaces'][0]['management']['ip'] - self.ha_master_host['hostname'] = role_configs[ - 'host_interfaces'][0]['name'] - self.components.append('CONFIG_LB_INSTALL') - add_service_with_hosts(self.services, - 'CONFIG_LB_BACKEND_HOSTS', - role_configs['host_interfaces']) - self.lb_vip = role_configs['vip'] - if is_ha: - # convert dns to ip - manage_ips = [] - for host_interface in role_configs['host_interfaces']: - manage_ip = '' - management_addr =\ - host_interface['management']['ip'] - try: - ip_lists = socket.gethostbyname_ex(management_addr) - manage_ip = ip_lists[2][0] - except Exception: - if netaddr.IPAddress(management_addr).version == 6: - manage_ip = management_addr - else: - raise exception.InvalidNetworkConfig( - "manage ip is not valid %s" % management_addr) - finally: - manage_ips.append(manage_ip) - - self.ha_vip = role_configs['vip'] - self.share_disk_services += role_configs['share_disk_services'] - self.share_cluster_disk_services += \ - role_configs['share_cluster_disk_services'] - local_deployment_ip = get_local_deployment_ip(manage_ips) - filename = r'/etc/zte-docker' - if local_deployment_ip: - if os.path.exists(filename): - add_service_with_host( - self.services, 'CONFIG_REPO', - 'http://' + local_deployment_ip + - ':18080' + '/tecs_install/') - else: - add_service_with_host( - self.services, 'CONFIG_REPO', - 'http://' + local_deployment_ip + '/tecs_install/') - else: - msg = "can't find ip for yum repo" - raise exception.InvalidNetworkConfig(msg) - self.components.append('CONFIG_HA_INSTALL') - add_service_with_host( - self.services, 'CONFIG_HA_HOST', - role_configs['host_interfaces'][0]['management']['ip']) - add_service_with_hosts(self.services, 'CONFIG_HA_HOSTS', - role_configs['host_interfaces']) - ntp_host = role_configs['ntp_server'] \ - if role_configs['ntp_server'] else role_configs['vip'] - add_service_with_host(self.services, 'CONFIG_NTP_SERVERS', - ntp_host) - - if role_configs['db_vip']: - self.db_vip = role_configs['db_vip'] - add_service_with_host( - self.services, 'CONFIG_MARIADB_HOST', - role_configs['db_vip']) - else: - self.db_vip = role_configs['vip'] - add_service_with_host( - self.services, 'CONFIG_MARIADB_HOST', role_configs['vip']) - - if role_configs['glance_vip']: - self.glance_vip = role_configs['glance_vip'] - add_service_with_host( - self.services, 'CONFIG_GLANCE_HOST', - role_configs['glance_vip']) - else: - self.glance_vip = role_configs['vip'] - add_service_with_host( - self.services, 'CONFIG_GLANCE_HOST', role_configs['vip']) - - if role_configs['public_vip']: - self.public_vip = role_configs['public_vip'] - else: - self.public_vip = role_configs['vip'] - - add_service_with_host(self.services, - 'CONFIG_NOVA_VNCPROXY_HOST', - self.public_vip) - add_service_with_host(self.services, 'CONFIG_PUBLIC_IP', - self.public_vip) - add_service_with_host(self.services, 'CONFIG_HORIZON_HOST', - self.public_vip) - ''' - add_service_with_host(self.services, 'CONFIG_ADMIN_IP', - role_configs['vip']) - add_service_with_host(self.services, 'CONFIG_INTERNAL_IP', - role_configs['vip']) - ''' - - def prepare_role_service(self, is_ha, service, role_configs): - host_key_name = "CONFIG_%s_HOST" % service - hosts_key_name = "CONFIG_%s_HOSTS" % service - - add_service_with_hosts(self.services, hosts_key_name, - role_configs['host_interfaces']) - if service != 'LB' and service not in ['NOVA_VNCPROXY', 'MARIADB', - 'GLANCE', 'HORIZON']: - add_service_with_host(self.services, host_key_name, - role_configs['vip']) - - if is_ha and service == 'LB': - add_service_with_hosts( - self.services, 'CONFIG_LB_FRONTEND_HOSTS', - role_configs['host_interfaces']) - - def prepare_mode(self, is_ha, is_lb, service): - mode_key = "CONFIG_%s_INSTALL_MODE" % service - if is_ha: - self.modes.update({mode_key: 'HA'}) - elif is_lb: - self.modes.update({mode_key: 'LB'}) - # special process - if service == 'GLANCE': - self.modes.update( - {'CONFIG_GLANCE_API_INSTALL_MODE': 'LB'}) - self.modes.update( - {'CONFIG_GLANCE_REGISTRY_INSTALL_MODE': 'LB'}) - # if s == 'HEAT': - # self.modes.update({'CONFIG_HEAT_API_INSTALL_MODE': 'LB'}) - # self.modes.update({'CONFIG_HEAT_API_CFN_INSTALL_MODE': 'LB'}) - # if s == 'CEILOMETER': - # self.modes.update({ - # 'CONFIG_CEILOMETER_API_INSTALL_MODE': 'LB'}) - if service == 'IRONIC': - self.modes.update( - {'CONFIG_IRONIC_API_INSTALL_MODE': 'LB'}) - else: - self.modes.update({mode_key: 'None'}) - - def prepare_services_in_component(self, component, service, role_configs): - if component not in self.services_in_component.keys(): - self.services_in_component[component] = {} - self.services_in_component[component]["service"] = [] - self.services_in_component[component][ - "service"].append(service_map[service]) - - if component == "horizon": - self.services_in_component[component]["fip"] = self.public_vip - elif component == "database": - self.services_in_component[component]["fip"] = self.db_vip - elif component == "glance": - self.services_in_component[component]["fip"] = self.glance_vip - else: - self.services_in_component[component]["fip"] = role_configs["vip"] - - network_name = '' - if component in ['horizon'] and\ - 'publicapi' in role_configs["host_interfaces"][0]: - network_name = 'publicapi' - else: - network_name = 'management' - - self.services_in_component[component]["netmask"] = \ - role_configs["host_interfaces"][0][network_name]["netmask"] - self.services_in_component[component]["nic_name"] = \ - role_configs["host_interfaces"][0][network_name]["name"] - if component == 'loadbalance' and \ - 'CONTROLLER_LB' in self.all_configs and \ - self.all_configs['CONTROLLER_LB']['vip']: - self.services_in_component[component]["fip"] = \ - self.all_configs['CONTROLLER_LB']['vip'] - - def prepare_amqp_mariadb(self): - if self.lb_vip: - amqp_vip = '' - if self.modes['CONFIG_AMQP_INSTALL_MODE'] == 'LB': - amqp_vip = self.lb_vip - add_service_with_host( - self.services, - 'CONFIG_AMQP_CLUSTER_MASTER_NODE_IP', - self.ha_master_host['ip']) - add_service_with_host( - self.services, 'CONFIG_AMQP_CLUSTER_MASTER_NODE_HOSTNAME', - self.ha_master_host['hostname']) - else: - amqp_vip = self.ha_vip - amqp_dict = "{'%s':'%s,%s,%s,%s'}" % (amqp_vip, self.ha_vip, - self.lb_vip, self.glance_vip, - self.public_vip) - mariadb_dict = "{'%s':'%s,%s,%s,%s'}" % (self.db_vip, self.ha_vip, - self.lb_vip, - self.glance_vip, - self.public_vip) - add_service_with_host(self.services, 'CONFIG_LB_HOST', self.lb_vip) - elif self.ha_vip: - amqp_dict = "{'%s':'%s,%s,%s'}" % (self.ha_vip, self.ha_vip, - self.glance_vip, - self.public_vip) - mariadb_dict = "{'%s':'%s,%s,%s'}" % (self.db_vip, self.ha_vip, - self.glance_vip, - self.public_vip) - else: - amqp_dict = "{}" - mariadb_dict = "{}" - if self.lb_vip or self.ha_vip: - add_service_with_host(self.services, 'CONFIG_MARIADB_DICT', - mariadb_dict) - add_service_with_host(self.services, 'CONFIG_AMQP_DICT', amqp_dict) - - def prepare(self): - for role_name, role_configs in self.all_configs.items(): - if role_name == "OTHER": - continue - - is_ha = re.match(".*_HA$", role_name) is not None - is_lb = re.match(".*_LB$", role_name) is not None - is_child_cell = re.match(".*_CHILD_CELL.*", role_name) is not None - if is_child_cell: - self.prepare_child_cell(role_name, role_configs) - continue - self.prepare_ha_lb(role_configs, is_ha, is_lb) - - for service, component in role_configs['services'].items(): - s = service.strip().upper().replace('-', '_') - self.prepare_role_service(is_ha, s, role_configs) - self.prepare_mode(is_ha, is_lb, s) - - if is_lb: - self.lb_components.append(component) - c = "CONFIG_%s_INSTALL" % \ - component.strip().upper().replace('-', '_') - self.components.append(c) - - if is_ha: - if component == 'log': - continue - self.prepare_services_in_component(component, service, - role_configs) - if is_ha: - self.get_heartbeats(role_configs['host_interfaces']) - - self.prepare_amqp_mariadb() - - if self.child_cell_dict: - add_service_with_host(self.services, 'CONFIG_CHILD_CELL_DICT', - str(self.child_cell_dict)) - - def update_conf_with_services(self, tecs): - for s in self.services: - if tecs.has_option("general", s): - # if type(self.services[s]) is types.ListType: - if isinstance(self.services[s], types.ListType): - if self.services[s] and not self.services[s][0]: - return - tecs.set("general", s, ','.join(self.services[s])) - else: - msg = "service %s is not exit in conf file" % s - LOG.info(msg) - - def update_conf_with_components(self, tecs): - for s in self.components: - if tecs.has_option("general", s): - tecs.set("general", s, 'y') - else: - msg = "component %s is not exit in conf file" % s - LOG.info(msg) - - def update_conf_with_modes(self, tecs): - for k, v in self.modes.items(): - if tecs.has_option("general", k): - tecs.set("general", k, v) - else: - msg = "mode %s is not exit in conf file" % k - LOG.info(msg) - - def update_tecs_conf(self, tecs): - self.update_conf_with_services(tecs) - self.update_conf_with_components(tecs) - self.update_conf_with_modes(tecs) - - def update_ha_conf(self, ha, ha_nic_name, tecs=None): - if self.all_configs['OTHER'].get('dns_config'): - for heartbeat in self.heartbeats: - for name_ip in self.all_configs['OTHER']['dns_config']: - for tmp in heartbeat: - if tmp == name_ip.keys()[0]: - heartbeat.remove(tmp) - heartbeat.append(name_ip.values()[0]) - - for k, v in self.services_in_component.items(): - for name_ip in self.all_configs['OTHER']['dns_config']: - if v['fip'] == name_ip.keys()[0]: - v['fip'] = name_ip.values()[0] - ha.set('DEFAULT', 'heartbeat_link1', ','.join(self.heartbeats[0])) - ha.set('DEFAULT', 'heartbeat_link2', ','.join(self.heartbeats[1])) - ha.set('DEFAULT', 'heartbeat_link3', ','.join(self.heartbeats[2])) - - ha.set('DEFAULT', 'components', ','.join( - self.services_in_component.keys())) - - for k, v in self.services_in_component.items(): - ha.set('DEFAULT', k, ','.join(v['service'])) - if k == 'glance': - if 'glance' in self.share_disk_services: - ha.set('DEFAULT', 'glance_device_type', 'iscsi') - ha.set( - 'DEFAULT', 'glance_device', - '/dev/mapper/vg_glance-lv_glance') - ha.set('DEFAULT', 'glance_fs_type', 'ext4') - else: - ha.set('DEFAULT', 'glance_device_type', 'drbd') - ha.set( - 'DEFAULT', 'glance_device', '/dev/vg_data/lv_glance') - ha.set('DEFAULT', 'glance_fs_type', 'ext4') - # mariadb now not support db cluster, don't support share disk. - if k == "database": - if 'db' in self.share_disk_services: - ha.set( - 'DEFAULT', 'database_device', - '/dev/mapper/vg_db-lv_db') - ha.set('DEFAULT', 'database_fs_type', 'ext4') - ha.set('DEFAULT', 'database_device_type', 'share') - if tecs: - tecs.set( - "general", - 'CONFIG_HA_INSTALL_MARIADB_LOCAL', - 'n') - elif 'db' in self.share_cluster_disk_services: - ha.set( - 'DEFAULT', 'database_device', - '/dev/mapper/vg_db-lv_db') - ha.set('DEFAULT', 'database_fs_type', 'ext4') - ha.set('DEFAULT', 'database_device_type', 'share_cluster') - if tecs: - tecs.set( - "general", - 'CONFIG_HA_INSTALL_MARIADB_LOCAL', - 'y') - else: - ha.set('DEFAULT', 'database_device_type', 'local_cluster') - if tecs: - tecs.set( - "general", - 'CONFIG_HA_INSTALL_MARIADB_LOCAL', - 'y') - - if 'db_backup' in self.share_disk_services: - ha.set( - 'DEFAULT', - 'backup_database_device', - '/dev/mapper/vg_db_backup-lv_db_backup') - ha.set('DEFAULT', 'backup_database_fs_type', 'ext4') - - if "mongod" in v['service']: - if 'mongodb' in self.share_disk_services: - ha.set( - 'DEFAULT', 'mongod_device', - '/dev/mapper/vg_mongodb-lv_mongodb') - ha.set('DEFAULT', 'mongod_fs_type', 'ext4') - ha.set('DEFAULT', 'mongod_local', '') - if tecs: - tecs.set( - "general", - 'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'n') - else: - ha.set('DEFAULT', 'mongod_fs_type', 'ext4') - ha.set('DEFAULT', 'mongod_local', 'yes') - if tecs: - tecs.set( - "general", - 'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'y') - - if k not in self.lb_components: - # if "bond" in v['nic_name']: - # v['nic_name'] = "vport" - ha.set('DEFAULT', k + '_fip', v['fip']) - if ha_nic_name and k not in ['horizon']: - nic_name = ha_nic_name - else: - nic_name = v['nic_name'] - ha.set('DEFAULT', k + '_nic', nic_name) - cidr_netmask = reduce(lambda x, y: x + y, - [bin(int(i)).count('1') - for i in v['netmask'].split('.')]) - ha.set('DEFAULT', k + '_netmask', cidr_netmask) - - -def update_conf(tecs, key, value): - tecs.set("general", key, value) - - -def get_conf(tecs_conf_file, **kwargs): - result = {} - if not kwargs: - return result - - tecs = ConfigParser() - tecs.optionxform = str - tecs.read(tecs_conf_file) - - result = {key: tecs.get("general", kwargs.get(key, None)) - for key in kwargs.keys() - if tecs.has_option("general", kwargs.get(key, None))} - return result - - -def _get_physnics_info(network_type, phynics): - # bond1(active-backup;lacp;eth1-eth2) - # eth0 - # phynet1:eth0 - # phynet1:bond1(active-backup;lacp;eth1-eth2), phynet2:eth3 - phynics_info = [] - if not phynics: - return - - phynic_info = phynics.split("(") - if 2 == len(phynic_info): - phynic_info = phynic_info[1][0:-1].split(";") - phynics_info.extend(phynic_info[-1].split('-')) - else: - phynic_info = phynic_info[0].split(":") - if network_type == 'vlan': - phynics_info.append(phynic_info[1]) - else: - phynics_info.append(phynic_info[0]) - return phynics_info - - -def get_physnics_info(network_type, phynics): - # bond1(active-backup;lacp;eth1-eth2) - # phynet1:eth0 - # phynet1:bond1(active-backup;lacp;eth1-eth2), phynet1:eth3 - phynics_info = [] - if network_type == 'vxlan': - phynics_info.extend(_get_physnics_info(network_type, phynics)) - elif network_type == 'vlan': - phynics = phynics.split(',') - for phynic_info in phynics: - phynics_info.extend(_get_physnics_info(network_type, phynic_info)) - return phynics_info - - -def update_conf_with_zenic(tecs, zenic_configs): - zenic_vip = zenic_configs.get('vip') - if not zenic_vip: - return - - auth = zenic_configs.get('auth') - if not auth: - auth = 'restconf:LkfhRDGIPyGzbWGM2uAaNQ==' - - update_conf(tecs, 'CONFIG_ZENIC_USER_AND_PW', auth) - update_conf(tecs, 'CONFIG_ZENIC_API_NODE', '%s:8181' % zenic_vip) - - ml2_drivers = tecs.get( - "general", 'CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS').split(',') - ml2_drivers.extend(['proxydriver']) - update_conf( - tecs, 'CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS', ','.join(ml2_drivers)) - - -class DvsDaisyConfig(object): - - def __init__(self, tecs, networks_config): - self.tecs = tecs - self.networks_config = networks_config - - # common - self.dvs_network_type = [] - self.dvs_vswitch_type = {} - self.dvs_cpu_sets = [] - self.dvs_physnics = [] - self.enable_sdn = False - - # for vlan - self.dvs_physical_mappings = [] - self.dvs_bridge_mappings = [] - - # for vxlan - self.dvs_vtep_ip_ranges = [] - self.dvs_vxlan_info = '' - self.dvs_domain_id = {} - - def config_tecs_for_dvs(self): - self._get_dvs_config() - self._set_dvs_config() - - def _get_dvs_config(self): - network = self.networks_config - vswitch_type = network.get('vswitch_type') - if not vswitch_type: - return - self.dvs_vswitch_type.update(vswitch_type) - - dvs_cpu_sets = network.get('dvs_cpu_sets') - self.dvs_cpu_sets.extend(dvs_cpu_sets) - - network_type = network['network_config'].get('network_type') - - if network_type in ['vlan']: - self.dvs_network_type.extend(['vlan']) - self._private_network_conf_for_dvs(network) - - elif network_type in ['vxlan']: - self.dvs_network_type.extend(['vxlan']) - self._bearing_network_conf_for_dvs(network) - - def _set_dvs_config(self): - if not self.networks_config.get('enable_sdn') and ( - self.dvs_vswitch_type.get('ovs_agent_patch')) and ( - len(self.dvs_vswitch_type.get('ovs_agent_patch')) > 0): - return - - if not self.dvs_vswitch_type.get('ovs_agent_patch') and not\ - self.dvs_vswitch_type.get('ovdk'): - return - - update_conf(self.tecs, 'CONFIG_DVS_TYPE', self.dvs_vswitch_type) - update_conf(self.tecs, 'CONFIG_DVS_PHYSICAL_NICS', - ",".join(set(self.dvs_physnics))) - # cpu sets for dvs, add CONFIG_DVS_CPU_SETS to tecs.conf firstly - update_conf(self.tecs, 'CONFIG_DVS_CPU_SETS', self.dvs_cpu_sets) - - if 'vlan' in self.dvs_network_type: - update_conf(self.tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS', - self.dvs_bridge_mappings) - update_conf(self.tecs, 'CONFIG_NEUTRON_OVS_PHYSNET_IFACES', - self.dvs_physical_mappings) - - elif 'vxlan' in self.dvs_network_type: - update_conf(self.tecs, 'CONFIG_DVS_VXLAN_INFO', - self.dvs_vxlan_info) - update_conf(self.tecs, 'CONFIG_DVS_NODE_DOMAIN_ID', - self.dvs_domain_id) - update_conf(self.tecs, 'CONFIG_NEUTRON_ML2_VTEP_IP_RANGES', - self.dvs_vtep_ip_ranges) - - ''' - private_networks_config_for_dvs - { - network_config = { - enable_sdn = '' - network_type = ['vlan'] - } - - vswitch_type = { ===============> such as vxlan - 'ovdk': ['192.168.0.2', '192.168.0.20'] , - 'ovs_agent_patch': ['192.168.0.21', '192.168.0.30'] - } - - physnics_config = { - physical_mappings = eth0 ===============> such as ovs vlan - bridge_mappings = ==========> private->name & physical_name - } - } - ''' - - def _private_network_conf_for_dvs(self, private_network): - self.dvs_vswitch_type.update(private_network.get('vswitch_type')) - self.dvs_bridge_mappings = \ - private_network['physnics_config'].get('bridge_mappings') - self.dvs_physical_mappings = \ - private_network['physnics_config'].get('physical_mappings') - self.dvs_physical_mappings = self.dvs_physical_mappings.encode('utf8') - - self.dvs_physnics.extend( - get_physnics_info('vlan', self.dvs_physical_mappings)) - - ''' - bearing_networks_config - { - network_config = { - enable_sdn = '' - network_type = ['vxlan'] - vtep_ip_ranges=[['192.168.0.2','192.168.0.200']]==>bearing->ip_range - } - - vswitch_type = { ==========> bearing->assigned_network - 'ovdk': ['192.168.0.2', '192.168.0.20'] , - 'ovs_agent_patch': ['192.168.0.21', '192.168.0.30'] - } - - physnics_config = { - vxlan_info = eth0 ======>bearing->assigned_network->host_interface - domain_id = { ==========> bearing->assigned_network - '0': ['192.168.0.2', '192.168.0.20'] , - '1': ['192.168.0.21', '192.168.0.30'] - } - } - } - ''' - - def _bearing_network_conf_for_dvs(self, bearing_network): - self.dvs_vtep_ip_ranges.extend( - bearing_network['network_config'].get('vtep_ip_ranges')) - self.dvs_vswitch_type.update(bearing_network.get('vswitch_type')) - self.dvs_domain_id.update( - bearing_network['physnics_config'].get('dvs_domain_id')) - self.dvs_vxlan_info = \ - bearing_network['physnics_config'].get('vxlan_info') - self.dvs_physnics.extend( - get_physnics_info('vxlan', self.dvs_vxlan_info)) - - -default_tecs_conf_template_path = "/var/lib/daisy/tecs/" -tecs_conf_template_path = default_tecs_conf_template_path - - -def private_network_conf(tecs, private_networks_config): - if private_networks_config: - mode_str = { - '0': '(active-backup;off;"%s-%s")', - '1': '(balance-slb;off;"%s-%s")', - '2': '(balance-tcp;active;"%s-%s")' - } - - config_neutron_sriov_bridge_mappings = [] - config_neutron_sriov_physnet_ifaces = [] - config_neutron_ovs_bridge_mappings = [] - config_neutron_ovs_physnet_ifaces = [] - for private_network in private_networks_config: - type = private_network.get('type', None) - name = private_network.get('name', None) - assign_networks = private_network.get('assigned_networks', None) - slave1 = private_network.get('slave1', None) - slave2 = private_network.get('slave2', None) - mode = private_network.get('mode', None) - if not type or not name or not assign_networks or not\ - slave1 or not slave2 or not mode: - break - - for assign_network in assign_networks: - network_type = assign_network.get('network_type', None) - # TODO:why ml2_type & physnet_name is null - ml2_type = assign_network.get('ml2_type', None) - physnet_name = assign_network.get('physnet_name', None) - if not network_type or not ml2_type or not physnet_name: - break - - # ether - if 0 == cmp(type, 'ether') and\ - 0 == cmp(network_type, 'DATAPLANE'): - if 0 == cmp(ml2_type, 'sriov'): - config_neutron_sriov_bridge_mappings.append( - "%s:%s" % (physnet_name, "br-" + name)) - config_neutron_sriov_physnet_ifaces.append( - "%s:%s" % (physnet_name, name)) - elif 0 == cmp(ml2_type, 'ovs'): - config_neutron_ovs_bridge_mappings.append( - "%s:%s" % (physnet_name, "br-" + name)) - config_neutron_ovs_physnet_ifaces.append( - "%s:%s" % (physnet_name, name)) - # bond - elif 0 == cmp(type, 'bond') and\ - 0 == cmp(network_type, 'DATAPLANE'): - if 0 == cmp(ml2_type, 'sriov'): - config_neutron_sriov_bridge_mappings.append( - "%s:%s" % (physnet_name, "br-" + name)) - config_neutron_sriov_physnet_ifaces.append( - "%s:%s" % (physnet_name, name + mode_str[mode] - % (slave1, slave2))) - elif 0 == cmp(ml2_type, 'ovs'): - config_neutron_ovs_bridge_mappings.append( - "%s:%s" % (physnet_name, "br-" + name)) - config_neutron_ovs_physnet_ifaces.append( - "%s:%s" % (physnet_name, name + mode_str[mode] - % (slave1, slave2))) - - if config_neutron_sriov_bridge_mappings: - update_conf(tecs, - 'CONFIG_NEUTRON_SRIOV_BRIDGE_MAPPINGS', - ",".join(config_neutron_sriov_bridge_mappings)) - if config_neutron_sriov_physnet_ifaces: - update_conf(tecs, - 'CONFIG_NEUTRON_SRIOV_PHYSNET_IFACES', - ",".join(config_neutron_sriov_physnet_ifaces)) - if config_neutron_ovs_bridge_mappings: - update_conf(tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS', - ",".join(config_neutron_ovs_bridge_mappings)) - if config_neutron_ovs_physnet_ifaces: - update_conf(tecs, 'CONFIG_NEUTRON_OVS_PHYSNET_IFACES', - ",".join(config_neutron_ovs_physnet_ifaces)) - - -def update_tecs_config(config_data, cluster_conf_path): - msg = "tecs config data is: %s" % config_data - LOG.info(msg) - - daisy_tecs_path = tecs_conf_template_path - tecs_conf_template_file = os.path.join(daisy_tecs_path, "tecs.conf") - ha_conf_template_file = os.path.join(daisy_tecs_path, "HA.conf") - if not os.path.exists(cluster_conf_path): - os.makedirs(cluster_conf_path) - tecs_conf_out = os.path.join(cluster_conf_path, "tecs.conf") - ha_config_out = os.path.join(cluster_conf_path, "HA_1.conf") - - tecs = ConfigParser() - tecs.optionxform = str - tecs.read(tecs_conf_template_file) - - cluster_data = config_data['OTHER']['cluster_data'] - update_conf(tecs, 'CLUSTER_ID', cluster_data['id']) - # if cluster_data.has_key('networking_parameters'): - if 'networking_parameters' in cluster_data: - networking_parameters = cluster_data['networking_parameters'] - # if networking_parameters.has_key('base_mac') and\ - if 'base_mac'in networking_parameters and\ - networking_parameters['base_mac']: - update_conf( - tecs, 'CONFIG_NEUTRON_BASE_MAC', - networking_parameters['base_mac']) - # if networking_parameters.has_key('gre_id_range') and\ - if 'gre_id_range' in networking_parameters and\ - len(networking_parameters['gre_id_range']) > 1 \ - and networking_parameters['gre_id_range'][0] and\ - networking_parameters['gre_id_range'][1]: - update_conf(tecs, 'CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES', - ("%s:%s" % (networking_parameters['gre_id_range'][0], - networking_parameters['gre_id_range'][1]))) - if 'vxlan' in config_data['OTHER'].get('segmentation_type', {}): - update_conf( - tecs, 'CONFIG_NEUTRON_ML2_VNI_RANGES', - config_data['OTHER']['segmentation_type']['vxlan']['vni_range']) - update_conf(tecs, 'CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES', 'vxlan') - update_conf(tecs, 'CONFIG_NEUTRON_ML2_TYPE_DRIVERS', 'vxlan') - else: - update_conf(tecs, 'CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES', 'vlan') - update_conf(tecs, 'CONFIG_NEUTRON_ML2_TYPE_DRIVERS', 'vlan') - - physic_network_cfg = config_data['OTHER']['physic_network_config'] - if physic_network_cfg.get('json_path', None): - update_conf( - tecs, 'CONFIG_NEUTRON_ML2_JSON_PATH', - physic_network_cfg['json_path']) - if physic_network_cfg.get('vlan_ranges', None): - update_conf(tecs, 'CONFIG_NEUTRON_ML2_VLAN_RANGES', - physic_network_cfg['vlan_ranges']) - if config_data['OTHER']['tecs_installed_hosts']: - update_conf(tecs, 'EXCLUDE_SERVERS', ",".join( - config_data['OTHER']['tecs_installed_hosts'])) - - ha = ConfigParser() - ha.optionxform = str - ha.read(ha_conf_template_file) - - config = AnalsyConfig(config_data) - # if config_data['OTHER'].has_key('ha_nic_name'): - if 'ha_nic_name'in config_data['OTHER']: - ha_nic_name = config_data['OTHER']['ha_nic_name'] - else: - ha_nic_name = "" - - config.prepare() - - config.update_tecs_conf(tecs) - config.update_ha_conf(ha, ha_nic_name, tecs) - - update_conf_with_zenic(tecs, config_data['OTHER']['zenic_config']) - # if config_data['OTHER']['dvs_config'].has_key('network_config'): - if 'network_config' in config_data['OTHER']['dvs_config']: - config_data['OTHER']['dvs_config']['network_config']['enable_sdn'] = \ - config_data['OTHER']['zenic_config'].get('vip', False) - dvs_config = DvsDaisyConfig(tecs, config_data['OTHER']['dvs_config']) - - dvs_config.config_tecs_for_dvs() - - tecs.write(open(tecs_conf_out, "w+")) - ha.write(open(ha_config_out, "w+")) - - return - - -def test(): - print("Hello, world!") diff --git a/code/daisy/daisy/api/backends/tecs/disk_array.py b/code/daisy/daisy/api/backends/tecs/disk_array.py deleted file mode 100755 index 4ea8c064..00000000 --- a/code/daisy/daisy/api/backends/tecs/disk_array.py +++ /dev/null @@ -1,315 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/install endpoint for tecs API -""" -import subprocess -from oslo_log import log as logging - -from daisy import i18n - -from daisy.common import exception -import daisy.api.backends.common as daisy_cmn -import daisy.api.backends.tecs.common as tecs_cmn - -try: - import simplejson as json -except ImportError: - import json - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW - - -def _get_service_disk_for_disk_array(req, role_id): - disk_info = [] - service_disks = tecs_cmn.get_service_disk_list(req, - {'filters': { - 'role_id': role_id}}) - for service_disk in service_disks: - share_disk = {} - if service_disk['disk_location'] == 'share': - share_disk['service'] = service_disk['service'] - share_disk['protocol_type'] = service_disk['protocol_type'] - share_disk['lun'] = service_disk['lun'] - if service_disk['protocol_type'] == 'FIBER': - share_disk['fc_hba_wwpn'] = \ - service_disk['data_ips'].split(',') - else: - share_disk['data_ips'] = service_disk['data_ips'].split(',') - share_disk['lvm_config'] = {} - share_disk['lvm_config']['size'] = service_disk['size'] - share_disk['lvm_config']['vg_name'] =\ - 'vg_%s' % service_disk['service'] - share_disk['lvm_config']['lv_name'] =\ - 'lv_%s' % service_disk['service'] - share_disk['lvm_config']['fs_type'] = 'ext4' - disk_info.append(share_disk) - return disk_info - - -def _get_share_cluster_disk_for_disk_array(req, role_id): - ''' - disk_info = [{'service': 'db', 'lun': 'lun1', 'data_ips':'data_ip1'}, - {'service': 'db', 'lun': 'lun2', 'data_ips':'data_ip2'}, - {'service': 'glance', 'lun': 'lun3', 'data_ips':'data_ip3'}, - {'service': 'glance', 'lun': 'lun4', 'data_ips':'data_ip4'},] - ''' - disk_info = [] - service_disks = \ - tecs_cmn.get_service_disk_list(req, {'filters': {'role_id': role_id}}) - service_name = 'db' - for service_disk in service_disks: - share_cluster_disk = {} - if service_disk['disk_location'] == 'share_cluster': - share_cluster_disk['service'] = service_disk['service'] - share_cluster_disk['protocol_type'] = service_disk['protocol_type'] - share_cluster_disk['lun'] = service_disk['lun'] - if service_disk['protocol_type'] == 'FIBER': - share_cluster_disk['fc_hba_wwpn'] = \ - service_disk['data_ips'].split(',') - else: - share_cluster_disk['data_ips'] = \ - service_disk['data_ips'].split(',') - share_cluster_disk['lvm_config'] = {} - share_cluster_disk['lvm_config']['size'] = service_disk['size'] - share_cluster_disk['lvm_config']['vg_name'] =\ - 'vg_%s' % service_disk['service'] - share_cluster_disk['lvm_config']['lv_name'] =\ - 'lv_%s' % service_disk['service'] - share_cluster_disk['lvm_config']['fs_type'] = 'ext4' - disk_info.append(share_cluster_disk) - return disk_info - - -def _get_cinder_volume_for_disk_array(req, role_id): - cinder_volume_info = [] - cinder_volumes = tecs_cmn.get_cinder_volume_list(req, - {'filters': { - 'role_id': role_id}}) - for cinder_volume in cinder_volumes: - cv_info = {} - cv_info['management_ips'] =\ - cinder_volume['management_ips'].split(',') - cv_info['data_ips'] = cinder_volume['data_ips'].split(',') - cv_info['user_name'] = cinder_volume['user_name'] - cv_info['user_pwd'] = cinder_volume['user_pwd'] - index = cinder_volume['backend_index'] - cv_info['backend'] = {index: {}} - cv_info['backend'][index]['volume_driver'] =\ - cinder_volume['volume_driver'] - cv_info['backend'][index]['volume_type'] =\ - cinder_volume['volume_type'] - cv_info['backend'][index]['pools'] =\ - cinder_volume['pools'].split(',') - cinder_volume_info.append(cv_info) - return cinder_volume_info - - -def get_disk_array_info(req, cluster_id): - share_disk_info = [] - share_cluster_disk_info = [] - volume_disk_info = {} - cinder_volume_disk_list = [] - roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) - for role in roles: - if role['deployment_backend'] != daisy_cmn.tecs_backend_name: - continue - if role['name'] == 'CONTROLLER_HA': - share_disks = _get_service_disk_for_disk_array(req, role['id']) - share_cluster_disks = \ - _get_share_cluster_disk_for_disk_array(req, role['id']) - share_disk_info += share_disks - share_cluster_disk_info += share_cluster_disks - cinder_volumes =\ - _get_cinder_volume_for_disk_array(req, role['id']) - cinder_volume_disk_list += cinder_volumes - if cinder_volume_disk_list: - volume_disk_info['disk_array'] = cinder_volume_disk_list - return (share_disk_info, volume_disk_info, share_cluster_disk_info) - - -def config_ha_share_disk(share_disk_info, - controller_ha_nodes, - share_cluster_disk_info=None): - ''' - share_disk_info = \ - [{'service': 'db', 'lun': 'lun1', 'data_ips':'data_ip1'}, - {'service': 'glance', 'lun': 'lun3', 'data_ips':'data_ip3'},] - share_cluster_disk_info = \ - [{'service': 'db', 'lun': 'lun1', 'data_ips':'data_ip1', ...}, - {'service': 'db', 'lun': 'lun2', 'data_ips':'data_ip2', ...}, - {'service': 'glance', 'lun': 'lun3', 'data_ips':'data_ip3'}, - {'service': 'glance', 'lun': 'lun4', 'data_ips':'data_ip4'},] - controller_ha_nodes[host_ip] = min_mac - ''' - sorted_db_share_cluster = [] - if share_cluster_disk_info: - db_share_cluster_disk = \ - [disk for disk in share_cluster_disk_info - if disk['service'] == 'db'] - if len(db_share_cluster_disk) != 2: - error_msg = 'share cluster disk: %s must be existed in pair.' % \ - db_share_cluster_disk - LOG.error(error_msg) - raise exception.InstallException(error_msg) - sorted_db_share_cluster = \ - sorted(db_share_cluster_disk, key=lambda s: s['lun']) - sorted_ha_nodes = \ - sorted(controller_ha_nodes.iteritems(), key=lambda d: d[1]) - sorted_ha_nodes_ip = [node[0] for node in sorted_ha_nodes] - - all_share_disk_info = [] - if sorted_db_share_cluster: - all_share_disk_info = \ - [[disk] + share_disk_info for disk in sorted_db_share_cluster] - # all_share_disk_info = \ - # [[{'lun': 'lun1', 'service': 'db', 'data_ips': 'data_ip1'}, - # {'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'}], - # [{'lun': 'lun2', 'service': 'db', 'data_ips': 'data_ip2'}, - # {'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'}]] - else: - for index in range(len(sorted_ha_nodes)): - all_share_disk_info.append(share_disk_info) - # all_share_disk_info = \ - # [{'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'}, - # {'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'}] - - ''' - cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json' - daisy_cmn.subprocess_call(cmd) - with open("/var/lib/daisy/tecs/storage_auto_config/base/control.json",\ - "w") as fp: - json.dump(share_disk_info, fp, indent=2) - - for host_ip in controller_ha_nodes.keys(): - try: - scp_bin_result = subprocess.check_output( - 'scp -o StrictHostKeyChecking=no -r\ - /var/lib/daisy/tecs/storage_auto_config\ - %s:/home/tecs_install' % (host_ip,), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\ - to %s failed!" % host_ip - raise exception.InstallException(error_msg) - try: - LOG.info(_("Config share disk for host %s" % host_ip)) - cmd = "cd /home/tecs_install/storage_auto_config/;\ - python storage_auto_config.py share_disk %s"\ - % controller_ha_nodes[host_ip] - exc_result = subprocess.check_output( - 'clush -S -w %s "%s"' % (host_ip,cmd), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - LOG.info(_("Storage script error message: %s" % e.output)) - error_msg = "config Disk Array share disks\ - on %s failed!" % host_ip - raise exception.InstallException(error_msg) - ''' - - cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json' - daisy_cmn.subprocess_call(cmd) - - for (host_ip, share_disk) in zip(sorted_ha_nodes_ip, all_share_disk_info): - with open("/var/lib/daisy/tecs/storage_auto_config/base/control.json", - "w") as fp: - json.dump(share_disk, fp, indent=2) - - try: - subprocess.check_output( - 'scp -o StrictHostKeyChecking=no -r\ - /var/lib/daisy/tecs/storage_auto_config\ - %s:/home/tecs_install' % (host_ip,), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\ - to %s failed!" % host_ip - raise exception.InstallException(error_msg) - - try: - LOG.info(_("Config share disk for host %s" % host_ip)) - cmd = "cd /home/tecs_install/storage_auto_config/;\ - python storage_auto_config.py share_disk %s"\ - % controller_ha_nodes[host_ip] - subprocess.check_output( - 'clush -S -w %s "%s"' % (host_ip, cmd), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - LOG.info(_("Storage script error message: %s" % e.output)) - error_msg = "config Disk Array share disks\ - on %s failed!" % host_ip - raise exception.InstallException(error_msg) - - -def config_ha_cinder_volume(volume_disk_info, controller_ha_ips): - cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json' - daisy_cmn.subprocess_call(cmd) - with open("/var/lib/daisy/tecs/storage_auto_config/base/cinder.json", - "w") as fp: - json.dump(volume_disk_info, fp, indent=2) - for host_ip in controller_ha_ips: - try: - subprocess.check_output( - 'scp -o StrictHostKeyChecking=no -r\ - /var/lib/daisy/tecs/storage_auto_config\ - %s:/home/tecs_install' % (host_ip,), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\ - to %s failed!" % host_ip - raise exception.InstallException(error_msg) - try: - LOG.info(_("Config cinder volume for host %s" % host_ip)) - cmd = 'cd /home/tecs_install/storage_auto_config/;\ - python storage_auto_config.py cinder_conf %s' % host_ip - subprocess.check_output( - 'clush -S -w %s "%s"' % (host_ip, cmd), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - LOG.info(_("Storage script error message: %s" % e.output)) - error_msg = "config Disk Array cinder volumes\ - on %s failed!" % host_ip - raise exception.InstallException(error_msg) - - -def config_compute_multipath(hosts_ip): - for host_ip in hosts_ip: - try: - subprocess.check_output( - 'scp -o StrictHostKeyChecking=no -r\ - /var/lib/daisy/tecs/storage_auto_config\ - %s:/home/tecs_install' % (host_ip,), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\ - to %s failed!" % host_ip - raise exception.InstallException(error_msg) - try: - LOG.info(_("Config multipath for host %s" % host_ip)) - cmd = 'cd /home/tecs_install/storage_auto_config/;\ - python storage_auto_config.py check_multipath' - subprocess.check_output( - 'clush -S -w %s "%s"' % (host_ip, cmd), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - LOG.info(_("Storage script error message: %s" % e.output)) - error_msg = "config Disk Array multipath\ - on %s failed!" % host_ip - raise exception.InstallException(error_msg) diff --git a/code/daisy/daisy/api/backends/tecs/install.py b/code/daisy/daisy/api/backends/tecs/install.py deleted file mode 100755 index 9e32579c..00000000 --- a/code/daisy/daisy/api/backends/tecs/install.py +++ /dev/null @@ -1,1526 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/install endpoint for tecs API -""" -import os -import re -import copy -import subprocess -import time - -from oslo_config import cfg -from oslo_log import log as logging -from webob.exc import HTTPBadRequest - -from threading import Thread - -from daisy import i18n -import daisy.api.v1 - -from daisy.common import utils -from daisy.common import exception -import daisy.registry.client.v1.api as registry -from daisy.api.backends.tecs import config -from daisy.api.network_api import network as neutron -import daisy.api.backends.common as daisy_cmn -import daisy.api.backends.tecs.common as tecs_cmn -import daisy.api.backends.tecs.disk_array as disk_array -from daisy.api.configset import manager - -try: - import simplejson as json -except ImportError: - import json - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW -SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS -SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS -ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE - -CONF = cfg.CONF -install_opts = [ - cfg.StrOpt('max_parallel_os_number', default=10, - help='Maximum number of hosts install os at the same time.'), -] -CONF.register_opts(install_opts) - -CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') -CONF.import_opt('container_formats', 'daisy.common.config', - group='image_format') -CONF.import_opt('image_property_quota', 'daisy.common.config') - - -tecs_state = tecs_cmn.TECS_STATE -daisy_tecs_path = tecs_cmn.daisy_tecs_path -tecs_install_path = tecs_cmn.tecs_install_path - - -def _invalid_bond_type(network_type, vswitch_type, bond_mode): - msg = "Invalid bond_mode(%s) for %s in %s network" % ( - bond_mode, vswitch_type, network_type) - raise_exception = False - if bond_mode in ['0', '1', '2', '3', '4']: - return - - if bond_mode and (2 == len(bond_mode.split(';'))): - bond_mode, lacp_mode = bond_mode.split(';') - if network_type in ['vxlan'] and vswitch_type in ['dvs', 'DVS']: - if bond_mode in ['active-backup', 'balance-slb']: - if lacp_mode not in ['off']: - raise_exception = True - else: - raise_exception = True - - elif network_type in ['vlan'] and vswitch_type in ['dvs', 'DVS', - 'ovs', 'OVS']: - if bond_mode in ['balance-tcp']: - if lacp_mode not in ['active', 'passive', 'off']: - raise_exception = True - elif bond_mode in ['active-backup', 'balance-slb']: - if lacp_mode not in ['off']: - raise_exception = True - else: - raise_exception = True - else: - raise_exception = True - - if raise_exception: - raise exception.InstallException(msg) - - -def _get_host_private_networks(host_detail, cluster_private_networks_name): - """ - User member nic pci segment replace the bond pci, - we use it generate the mappings.json. - :param host_detail: host infos - :param cluster_private_networks_name: network info in cluster - :return: - """ - host_private_networks = [hi for pn in cluster_private_networks_name - for hi in host_detail['interfaces'] - for assigned_network in hi['assigned_networks'] - if assigned_network and - pn == assigned_network['name']] - - # If port type is bond,use pci segment of member port replace pci1 & pci2 - # segments of bond port - for interface_outer in host_private_networks: - if 0 != cmp(interface_outer.get('type', None), "bond"): - continue - slave1 = interface_outer.get('slave1', None) - slave2 = interface_outer.get('slave2', None) - if not slave1 or not slave2: - continue - interface_outer.pop('pci') - - for interface_inner in host_detail['interfaces']: - if 0 == cmp(interface_inner.get('name', None), slave1): - interface_outer['pci1'] = interface_inner['pci'] - elif 0 == cmp(interface_inner.get('name', None), slave2): - interface_outer['pci2'] = interface_inner['pci'] - return host_private_networks - - -def _write_private_network_cfg_to_json(req, cluster_id, private_networks): - """ - Generate cluster private network json. - We use the json file after tecs is installed. - :param private_networks: cluster private network params set. - :return: - """ - if not private_networks: - LOG.error("private networks can't be empty!") - return False - - cluster_hosts_network_cfg = {} - hosts_network_cfg = {} - for k in private_networks.keys(): - private_network_info = {} - for private_network in private_networks[k]: - # host_interface - type = private_network.get('type', None) - name = private_network.get('name', None) - assign_networks = private_network.get('assigned_networks', None) - slave1 = private_network.get('slave1', None) - slave2 = private_network.get('slave2', None) - pci = private_network.get('pci', None) - pci1 = private_network.get('pci1', None) - pci2 = private_network.get('pci2', None) - mode = private_network.get('mode', None) - if not type or not name or not assign_networks: - LOG.error("host_interface params invalid in private networks!") - continue - - for assign_network in assign_networks: - # network - # network_type = assign_network.get('network_type', None) - vswitch_type_network = daisy_cmn.get_assigned_network( - req, private_network['id'], assign_network['id']) - - vswitch_type = vswitch_type_network['vswitch_type'] - physnet_name = assign_network.get('name', None) - mtu = assign_network.get('mtu', None) - if not vswitch_type or not physnet_name: - LOG.error( - "private networks vswitch_type or\ - physnet name is invalid!") - continue - - physnet_name_conf = {} - physnet_name_conf['type'] = type - physnet_name_conf['name'] = name - physnet_name_conf['vswitch_type'] = vswitch_type - if mtu: - physnet_name_conf['mtu'] = mtu - # physnet_name_conf['ml2'] = ml2_type + "(direct)" - if 0 == cmp("bond", type): - if not pci1 or not pci2 or not\ - slave1 or not slave2 or not mode: - LOG.error( - "when type is 'bond',\ - input params is invalid in private networks!") - continue - physnet_name_conf['slave1'] = slave1 - physnet_name_conf['slave2'] = slave2 - physnet_name_conf['pci1'] = pci1 - physnet_name_conf['pci2'] = pci2 - physnet_name_conf['mode'] = mode - _invalid_bond_type('vlan', 'OVS', mode) - elif 0 == cmp("ether", type): - if not pci: - LOG.error( - "when type is 'ether',\ - input params is invalid in private networks!") - continue - physnet_name_conf['pci'] = pci - - if not physnet_name_conf: - continue - private_network_info[physnet_name] = physnet_name_conf - - if not private_network_info: - continue - hosts_network_cfg[k] = private_network_info - - if not hosts_network_cfg: - return False - cluster_hosts_network_cfg['hosts'] = hosts_network_cfg - mkdir_daisy_tecs_path = "mkdir -p " + daisy_tecs_path + cluster_id - daisy_cmn.subprocess_call(mkdir_daisy_tecs_path) - mapping_json = daisy_tecs_path + "/" + cluster_id + "/" + "mappings.json" - with open(mapping_json, "w+") as fp: - fp.write(json.dumps(cluster_hosts_network_cfg)) - return True - - -def _conf_private_network(req, cluster_id, host_private_networks_dict, - cluster_private_network_dict): - if not host_private_networks_dict: - LOG.info(_("No private network need config")) - return {} - - # different host(with ip) in host_private_networks_dict - config_neutron_ml2_vlan_ranges = [] - for k in host_private_networks_dict.keys(): - host_private_networks = host_private_networks_dict[k] - # different private network plane in host_interface - for host_private_network in host_private_networks: - assigned_networks = host_private_network.get( - 'assigned_networks', None) - if not assigned_networks: - break - private_network_info = \ - [network for assigned_network in assigned_networks - for network in cluster_private_network_dict - if assigned_network and assigned_network[ - 'name'] == network['name']] - - host_private_network['assigned_networks'] = private_network_info - config_neutron_ml2_vlan_ranges += \ - ["%(name)s:%(vlan_start)s:%(vlan_end)s" % - {'name': network['name'], 'vlan_start':network[ - 'vlan_start'], 'vlan_end':network['vlan_end']} - for network in private_network_info - if network['name'] and network['vlan_start'] and - network['vlan_end']] - - physic_network_cfg = {} - if _write_private_network_cfg_to_json( - req, cluster_id, host_private_networks_dict): - physic_network_cfg['json_path'] = daisy_tecs_path + \ - "/" + cluster_id + "/" + "mappings.json" - if config_neutron_ml2_vlan_ranges: - host_private_networks_vlan_range = ",".join( - list(set(config_neutron_ml2_vlan_ranges))) - physic_network_cfg['vlan_ranges'] = host_private_networks_vlan_range - return physic_network_cfg - - -def _enable_network(host_networks_dict): - for network in host_networks_dict: - if network != []: - return True - return False - - -def _get_dvs_network_type(vxlan, vlan): - if _enable_network(vxlan): - return 'vxlan', vxlan - elif _enable_network(vlan): - return 'vlan', vlan - else: - return None, None - - -def _get_vtep_ip_ranges(ip_ranges): - vtep_ip_ranges = [] - for ip_range in ip_ranges: - ip_range_start = ip_range.get('start') - ip_range_end = ip_range.get('end') - if ip_range_start and ip_range_end: - vtep_ip_ranges.append( - [ip_range_start.encode('utf8'), - ip_range_end.encode('utf8')]) - return vtep_ip_ranges - - -def _get_dvs_vxlan_info(interfaces, mode_str): - vxlan_nic_info = '' - for interface in interfaces: - if interface['type'] == 'ether': - vxlan_nic_info = interface['name'] - elif interface['type'] == 'bond': - _invalid_bond_type('vxlan', 'DVS', interface.get('mode')) - name = interface.get('name', 'bond1') - if interface.get('mode') in ['0', '1', '2', '3', '4']: - try: - bond_mode = mode_str[ - 'vxlan'].get(interface.get('mode')) - except: - bond_mode = mode_str['vxlan']['0'] - vxlan_nic_info = name + bond_mode % ( - interface['slave1'], interface['slave2']) - else: - vxlan_nic_info = "%s(%s;%s-%s)" % ( - name, interface.get('mode'), - interface['slave1'], interface['slave2']) - return vxlan_nic_info - - -def _get_dvs_domain_id(assign_network, dvs_domain_id, host_ip): - domain_id = assign_network.get('dvs_domain_id') - if not domain_id: - domain_id = '0' - - domain_ip = dvs_domain_id.get(domain_id, []) - domain_ip.append(host_ip) - domain_ip = {domain_id.encode('utf8'): domain_ip} - return domain_ip - - -def _get_bridge_mappings(interface): - try: - network = interface['assigned_networks'][0] - except: - return {} - - bridge_mappings = {} - if network.get('network_type') in ['DATAPLANE']: - nic = interface.get('name') - phynet_name = network.get('name') - if phynet_name and nic: - bridge_mappings.update({nic: phynet_name}) - return bridge_mappings - - -def _convert_bridge_mappings2list(bridge_mappings): - bridge_maps = [] - for nic, phynet in bridge_mappings.items(): - bridge_maps.append('%s:br_%s' % (phynet, nic)) - return set(bridge_maps) - - -def _convert_physical_mappings2list(physical_mappings): - physical_maps = [] - for phynet, nic_info in physical_mappings.items(): - physical_maps.append('%s:%s' % (phynet, nic_info)) - return set(physical_maps) - - -def _get_physical_mappings(interface, mode_str, bridge_mappings): - # bridge_mappings = {'eth0':'phynet1': 'bond0':'phynet2'} - vlan_nic_map_info = {} - phynic_name = interface.get('name') - physnet_name = bridge_mappings.get(phynic_name) - if interface['type'] == 'bond': - _invalid_bond_type('vlan', 'DVS', interface.get('mode')) - if interface.get('mode') in ['0', '1', '2', '3', '4']: - try: - bond_mode = mode_str['vlan'].get(interface.get('mode')) - except: - bond_mode = mode_str['vlan']['0'] - vlan_nic_map_info[physnet_name] = phynic_name + bond_mode % ( - interface['slave1'], interface['slave2']) - else: - # interface.get('mode') = active-backup;off - vlan_nic_map_info[physnet_name] = "%s(%s;%s-%s)" % ( - phynic_name, interface.get('mode'), - interface['slave1'], interface['slave2']) - else: - vlan_nic_map_info[physnet_name] = phynic_name - - return vlan_nic_map_info - - -def get_network_config_for_dvs(host_private_networks_dict, - cluster_private_network_dict): - # different private network plane in host_interface - host_private_networks_dict_for_dvs =\ - copy.deepcopy(host_private_networks_dict) - - for host_private_network in host_private_networks_dict_for_dvs: - private_networks = host_private_network.get( - 'assigned_networks', None) - if not private_networks: - break - private_network_info = \ - [network for private_network in private_networks - for network in cluster_private_network_dict - if private_network and private_network[ - 'name'] == network['name']] - host_private_network['assigned_networks'] = private_network_info - return host_private_networks_dict_for_dvs - - -def get_dvs_cpu_sets(req, cluster_id, role_hosts): - """ - dvs_cpu_list = [{'IP': 'ip1', 'DVS_CPU': [1,2,3,4]}, - {'IP': 'ip2', 'DVS_CPU': [2,3,4,5]}] - """ - dvs_cpu_list = [] - cluster_networks =\ - daisy_cmn.get_cluster_networks_detail(req, cluster_id) - for role_host in role_hosts: - if (role_host['status'] == tecs_state['ACTIVE'] or - role_host['status'] == tecs_state['UPDATING'] or - role_host['status'] == tecs_state['UPDATE_FAILED']): - continue - host_detail = daisy_cmn.get_host_detail(req, role_host['host_id']) - - dvs_interfaces = utils.get_dvs_interfaces(host_detail['interfaces']) - if dvs_interfaces and 'dvs_cpus' in host_detail: - management_ip = tecs_cmn.get_host_network_ip(req, - host_detail, - cluster_networks, - 'MANAGEMENT') - dvs_cpu_dict = {} - dvs_cpu_dict['IP'] = management_ip - dvs_cpu_dict['DVS_CPU'] =\ - utils.cpu_str_to_list(host_detail['dvs_cpus']) - dvs_cpu_list.append(dvs_cpu_dict) - return dvs_cpu_list - - -def conf_dvs(req, host_vxlan_networks_dict, host_private_networks_dict): - mode_str = { - 'vxlan': - { - '0': '(active-backup;off;%s-%s)', - '1': '(balance-slb;off;%s-%s)', - }, - 'vlan': { - '0': '(active-backup;off;%s-%s)', - '1': '(balance-slb;off;%s-%s)', - '2': '(balance-tcp;active;%s-%s)' - } - } - - network_type, networks_dict = _get_dvs_network_type( - host_vxlan_networks_dict, host_private_networks_dict) - - if not network_type: - return {} - - dvs_config = {} - - network_config = {} - vswitch_type = {} - physnics_config = {} - installed_dvs = [] - installed_ovs = [] - network_config['network_type'] = network_type - - # for vxlan - network_config['vtep_ip_ranges'] = [] - dvs_domain_id = {} - - # for vlan - bridge_mappings = {} - physical_mappings = {} - - for host_ip, interfaces in networks_dict.items(): - host_ip = host_ip.encode('utf8') - assign_network = daisy_cmn.get_assigned_network( - req, interfaces[0]['id'], - interfaces[0]['assigned_networks'][0].get('id')) - - if assign_network['vswitch_type'] in ['dvs', 'DVS']: - installed_dvs.append(host_ip) - elif assign_network['vswitch_type'] in ['ovs', 'OVS']: - installed_ovs.append(host_ip) - - if network_type == 'vxlan': - network_config['vtep_ip_ranges'].extend( - _get_vtep_ip_ranges( - interfaces[0]['assigned_networks'][0].get('ip_ranges'))) - - dvs_domain_id.update( - _get_dvs_domain_id(assign_network, dvs_domain_id, host_ip)) - - if not physnics_config.get('vxlan_info'): - physnics_config['vxlan_info'] = _get_dvs_vxlan_info( - interfaces, mode_str) - - if network_type == 'vlan': - for interface in interfaces: - bridge_mapping = _get_bridge_mappings(interface) - physical_mapping = _get_physical_mappings( - interface, mode_str, bridge_mapping) - bridge_mappings.update(bridge_mapping) - physical_mappings.update(physical_mapping) - - vswitch_type['ovdk'] = installed_dvs - vswitch_type['ovs_agent_patch'] = installed_ovs - physnics_config['dvs_domain_id'] = dvs_domain_id - physnics_config['physical_mappings'] = ",".join( - _convert_physical_mappings2list(physical_mappings)) - physnics_config['bridge_mappings'] = ",".join( - _convert_bridge_mappings2list(bridge_mappings)) - - dvs_config['vswitch_type'] = vswitch_type - dvs_config['network_config'] = network_config - dvs_config['physnics_config'] = physnics_config - - return dvs_config - - -def _get_interfaces_network(req, host_detail, cluster_networks): - has_interfaces = {} - host_mngt_network = tecs_cmn.get_host_interface_by_network( - host_detail, 'MANAGEMENT') - host_mgnt_ip = tecs_cmn.get_host_network_ip( - req, host_detail, cluster_networks, 'MANAGEMENT') - host_mgnt_netmask = tecs_cmn.get_network_netmask( - cluster_networks, 'MANAGEMENT') - host_mngt_network['ip'] = host_mgnt_ip - host_mngt_network['netmask'] = host_mgnt_netmask - has_interfaces['management'] = host_mngt_network - - # host_deploy_network = tecs_cmn.get_host_interface_by_network( - # host_detail, 'DEPLOYMENT') - host_deploy_network_info = tecs_cmn.get_host_interface_by_network( - host_detail, 'DEPLOYMENT') - # note:"is_deployment" can't label delpoyment network, it only used to - # label dhcp mac - if host_deploy_network_info: - host_deploy_ip = tecs_cmn.get_host_network_ip( - req, host_detail, cluster_networks, 'DEPLOYMENT') - host_deploy_netmask = tecs_cmn.get_network_netmask( - cluster_networks, 'DEPLOYMENT') - host_deploy_network_info['ip'] = host_deploy_ip - host_deploy_network_info['netmask'] = host_deploy_netmask - has_interfaces['deployment'] = host_deploy_network_info - - mngt_network_cidr = tecs_cmn.get_network_cidr( - cluster_networks, 'MANAGEMENT') - host_storage_networks = [network for network in cluster_networks - if network['network_type'] == 'STORAGE' and - network['cidr'] != mngt_network_cidr] - if host_storage_networks: - host_storage_network_info = tecs_cmn.get_host_interface_by_network( - host_detail, host_storage_networks[0]['name']) - if host_storage_network_info: - host_storage_ip = tecs_cmn.get_host_network_ip( - req, host_detail, cluster_networks, - host_storage_networks[0]['name']) - host_storage_netmask = tecs_cmn.get_network_netmask( - cluster_networks, host_storage_networks[0]['name']) - host_storage_network_info['ip'] = host_storage_ip - host_storage_network_info['netmask'] = host_storage_netmask - has_interfaces['storage'] = host_storage_network_info - - host_public_network_info = tecs_cmn.get_host_interface_by_network( - host_detail, 'PUBLICAPI') - if host_public_network_info: - public_vlan_id = tecs_cmn.get_network_vlan_id( - cluster_networks, 'PUBLICAPI') - if public_vlan_id: - public_nic_name = host_public_network_info[ - 'name'] + '.' + public_vlan_id - else: - public_nic_name = host_public_network_info['name'] - - host_public_ip = tecs_cmn.get_host_network_ip( - req, host_detail, cluster_networks, 'PUBLICAPI') - host_public_netmask = tecs_cmn.get_network_netmask( - cluster_networks, 'PUBLICAPI') - host_public_network_info['ip'] = host_public_ip - host_public_network_info['name'] = public_nic_name - host_public_network_info['netmask'] = host_public_netmask - has_interfaces['publicapi'] = host_public_network_info - - cluster_heartbeat_network_dict = [ - network for network in cluster_networks if network[ - 'network_type'] == 'HEARTBEAT'] - cluster_heartbeat_networks_name = [ - network['name'] for network in cluster_heartbeat_network_dict] - for heartbeat_network_name in cluster_heartbeat_networks_name: - host_heartbeat_network_info = tecs_cmn.get_host_interface_by_network( - host_detail, heartbeat_network_name) - if not host_heartbeat_network_info: - cluster_heartbeat_networks_name.remove(heartbeat_network_name) - if len(cluster_heartbeat_networks_name) >= 1: - host_heartbeat1_network_info = tecs_cmn.get_host_interface_by_network( - host_detail, cluster_heartbeat_networks_name[0]) - host_heartbeat1_network_info['ip'] = tecs_cmn.get_host_network_ip( - req, host_detail, cluster_networks, - cluster_heartbeat_networks_name[0]) - has_interfaces['heartbeat1'] = host_heartbeat1_network_info - if len(cluster_heartbeat_networks_name) == 2: - host_heartbeat2_network_info = tecs_cmn.get_host_interface_by_network( - host_detail, cluster_heartbeat_networks_name[1]) - host_heartbeat2_network_info['ip'] = tecs_cmn.get_host_network_ip( - req, host_detail, cluster_networks, - cluster_heartbeat_networks_name[1]) - has_interfaces['heartbeat2'] = host_heartbeat2_network_info - return has_interfaces - - -def _get_host_nic_name(cluster_network, host_detail): - """ - Different networking will generate different ha port name, - the rule of generation - is describe in comment. - :param cluster_network: Network info in cluster. - :param host_detail: - :return: - """ - copy_host_detail = copy.deepcopy(host_detail) - - mgr_interface_info = tecs_cmn.get_host_interface_by_network( - copy_host_detail, 'MANAGEMENT') - nic_info = [network - for network in cluster_network - for netname in mgr_interface_info.get( - 'assigned_networks', None) - if network.get('name', None) == netname] - - nic_capability = [info['capability'] - for info in nic_info if info[ - 'network_type'] != "DATAPLANE"] - if not nic_capability or nic_capability == [None]: - return mgr_interface_info['name'] - - mgr_nic_info = [mgr_net for mgr_net in nic_info if mgr_net[ - 'network_type'] == "MANAGEMENT"][0] - # if private and management plane is unifier - if set(["DATAPLANE", "MANAGEMENT"]).issubset(set([info[ - 'network_type'] for info in nic_info])): - # if type = 'ether' and 'ovs' not in ml2 and management is 'high' - if "ether" == mgr_interface_info.get('type', None) and \ - "ovs" not in [mgr_interface_info.get('vswitch_type', None)] and \ - "high" == mgr_nic_info['capability']: - return mgr_interface_info['name'] - - # if ip at outer - if mgr_interface_info.get('ip', None) and mgr_interface_info.get( - 'name', None): - return "v_" + mgr_interface_info['name'] - # ip at inner - elif mgr_nic_info.get('ip', None): - return "managent" - - if "low" not in nic_capability: - return mgr_interface_info['name'] - - # if ip at outer - if mgr_interface_info.get('ip', None) and\ - mgr_interface_info.get('name', None): - return "v_" + mgr_interface_info['name'] - - # ip at inner - elif mgr_nic_info.get('ip', None): - return "managent" - - -def get_share_disk_services(req, role_id): - service_disks = tecs_cmn.get_service_disk_list(req, {'role_id': role_id}) - share_disk_services = [] - - for service_disk in service_disks: - if service_disk['disk_location'] == 'share': - share_disk_services.append(service_disk['service']) - return share_disk_services - - -def _get_vxlan_vni_range(network): - if network.get('vni_start') and network.get('vni_end'): - return '%s:%s' % (network['vni_start'], network['vni_end']) - else: - return '1000:3000' - - -def get_share_cluster_disk_services(req, role_id): - service_disks = tecs_cmn.get_service_disk_list(req, {'role_id': role_id}) - share_cluster_disk_services = [] - - for service_disk in service_disks: - if service_disk['disk_location'] == 'share_cluster': - share_cluster_disk_services.append(service_disk['service']) - return share_cluster_disk_services - - -def get_cluster_tecs_config(req, cluster_id): - LOG.info(_("Get tecs config from database...")) - params = dict(limit=1000000) - roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) - cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) - try: - all_services = registry.get_services_detail(req.context, **params) - all_components = registry.get_components_detail(req.context, **params) - cluster_data = registry.get_cluster_metadata(req.context, cluster_id) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - - segment_type = {} - dataplane_network_dict = [network for network in cluster_networks if - network['network_type'] == 'DATAPLANE'] - if dataplane_network_dict[0]['segmentation_type'] in ['vlan']: - cluster_private_network_dict = dataplane_network_dict - cluster_vxlan_network_dict = [] - segment_type.update({'vlan': ''}) - elif dataplane_network_dict[0]['segmentation_type'] in ['vxlan']: - cluster_private_network_dict = [] - cluster_vxlan_network_dict = dataplane_network_dict - segment_type.update({'vxlan': {'vni_range': _get_vxlan_vni_range( - dataplane_network_dict[0])}}) - else: - cluster_private_network_dict = [] - cluster_vxlan_network_dict = [] - - cluster_private_networks_name = [network['name'] for network in - cluster_private_network_dict] - cluster_vxlan_networks_name = [network['name'] for network in - cluster_vxlan_network_dict] - - tecs_config = {} - tecs_config.update({'OTHER': {}}) - other_config = tecs_config['OTHER'] - other_config.update({'cluster_data': cluster_data}) - tecs_installed_hosts = set() - host_private_networks_dict = {} - host_vxlan_network_dict = {} - host_private_networks_dict_for_dvs = {} - zenic_cfg = {} - dvs_cpu_sets = [] - for role in roles: - if role['name'] == 'ZENIC_NFM': - zenic_cfg['vip'] = role['vip'] - if role['deployment_backend'] != daisy_cmn.tecs_backend_name: - continue - try: - role_service_ids = registry.get_role_services( - req.context, role['id']) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - - role_services_detail = [asc for rsci in role_service_ids - for asc in all_services if asc[ - 'id'] == rsci['service_id']] - component_id_to_name = dict( - [(ac['id'], ac['name']) for ac in all_components]) - service_components = dict( - [(scd['name'], component_id_to_name[scd['component_id']]) - for scd in role_services_detail]) - - role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) - ha_nic_name = '' - host_interfaces = [] - for role_host in role_hosts: - host_detail = daisy_cmn.get_host_detail(req, role_host['host_id']) - - sorted_host_detail = tecs_cmn.sort_interfaces_by_pci( - cluster_networks, host_detail) - host_private_networks_list =\ - _get_host_private_networks(sorted_host_detail, - cluster_private_networks_name) - # get ha nic port name - if role['name'] == "CONTROLLER_HA": - mgr_nic_name = _get_host_nic_name( - cluster_networks, sorted_host_detail) - mgr_vlan_id = tecs_cmn.get_mngt_network_vlan_id( - cluster_networks) - if mgr_vlan_id: - mgr_nic_name = mgr_nic_name + '.' + mgr_vlan_id - if ha_nic_name and mgr_nic_name != ha_nic_name: - msg = "management plane nic name is\ - different on hosts with HA role" - LOG.error(msg) - raise HTTPBadRequest(explanation=msg, request=req) - else: - ha_nic_name = mgr_nic_name - # if not other_config.has_key('ha_nic_name'): - if 'ha_nic_name' not in other_config: - other_config.update({'ha_nic_name': mgr_nic_name}) - - has_interfaces = _get_interfaces_network( - req, host_detail, cluster_networks) - has_interfaces.update({'name': host_detail['name']}) - host_interfaces.append(has_interfaces) - # mangement network must be configed - host_mgnt_ip = has_interfaces['management']['ip'] - - host_mgnt = host_detail['name'] if cluster_data[ - 'use_dns'] else host_mgnt_ip - - # host_mgnt_ip used to label who the private networks is - host_private_networks_dict[host_mgnt] = host_private_networks_list - if role['name'] == 'COMPUTER': - host_vxlan_network_list = _get_host_private_networks( - sorted_host_detail, cluster_vxlan_networks_name) - if host_vxlan_network_list: - host_private_networks_dict_for_dvs = {} - host_vxlan_network_dict[host_mgnt] = \ - get_network_config_for_dvs( - host_vxlan_network_list, cluster_vxlan_network_dict) - elif host_private_networks_list: - host_vxlan_network_dict = {} - host_private_networks_dict_for_dvs[host_mgnt] = \ - get_network_config_for_dvs( - host_private_networks_list, - cluster_private_network_dict) - - # get host ip of tecs is active - if (role_host['status'] == tecs_state['ACTIVE'] or - role_host['status'] == tecs_state['UPDATING'] or - role_host['status'] == tecs_state['UPDATE_FAILED']): - tecs_installed_hosts.add(host_mgnt) - - share_disk_services = get_share_disk_services(req, role['id']) - share_cluster_disk_services = \ - get_share_cluster_disk_services(req, role['id']) - - is_ha = re.match(".*_HA$", role['name']) is not None - if host_interfaces: - # if role['public_vip'] and not - # host_interfaces[0].has_key('public'): - if role['public_vip'] and 'publicapi' not in host_interfaces[0]: - msg = "no public networkplane found while role has public vip" - LOG.error(msg) - raise exception.NotFound(message=msg) - - tecs_config.update({role['name']: - {'services': service_components, - 'vip': role['vip'], - 'host_interfaces': host_interfaces, - 'share_disk_services': share_disk_services, - 'share_cluster_disk_services': - share_cluster_disk_services - }}) - if is_ha: - tecs_config[role['name']]['ntp_server'] = role['ntp_server'] - tecs_config[role['name']]['public_vip'] = role['public_vip'] - tecs_config[role['name']]['glance_vip'] = role['glance_vip'] - tecs_config[role['name']]['db_vip'] = role['db_vip'] - if role['name'] == 'COMPUTER': - dvs_cpu_set = get_dvs_cpu_sets(req, cluster_id, role_hosts) - if dvs_cpu_set: - dvs_cpu_sets.extend(dvs_cpu_set) - other_config.update({'tecs_installed_hosts': tecs_installed_hosts}) - # replace private network - physic_network_cfg = _conf_private_network( - req, cluster_id, host_private_networks_dict, - cluster_private_network_dict) - dvs_cfg = conf_dvs( - req, host_vxlan_network_dict, host_private_networks_dict_for_dvs) - # set for dvs_cpu_sets - dvs_cfg['dvs_cpu_sets'] = dvs_cpu_sets - - other_config.update({'physic_network_config': physic_network_cfg}) - other_config.update({'dvs_config': dvs_cfg}) - other_config.update({'segmentation_type': segment_type}) - other_config.update({'zenic_config': zenic_cfg}) - return tecs_config - - -def get_host_name_and_mgnt_ip(tecs_config): - name_ip_list = [] - ip_list = [] - ha_nodes_ip = set() - nodes_ips = {'ha': set(), 'lb': set(), 'computer': set()} - - for role_name, role_configs in tecs_config.items(): - if role_name == "OTHER": - continue - for host in role_configs['host_interfaces']: - ip_domain_dict = {} - host_mgt = host['management'] - if host_mgt['ip'] not in ip_list: - ip_list.append(host_mgt['ip']) - ip_domain_dict.update({host['name']: host_mgt['ip']}) - name_ip_list.append(ip_domain_dict) - if role_name == 'CONTROLLER_HA': - ha_nodes_ip.add(host_mgt['ip']) - if host_mgt['ip'] in tecs_config['OTHER']['tecs_installed_hosts'] \ - or host['name'] \ - in tecs_config['OTHER']['tecs_installed_hosts']: - continue - if role_name == 'CONTROLLER_HA': - nodes_ips['ha'].add(host_mgt['ip']) - if role_name == 'CONTROLLER_LB': - nodes_ips['lb'].add(host_mgt['ip']) - if role_name == 'COMPUTER': - nodes_ips['computer'].add(host_mgt['ip']) - return name_ip_list, nodes_ips, ha_nodes_ip - - -def replace_ip_with_domain_name(req, tecs_config): - domain_ip_list = [] - ip_list = [] - lb_float_ip = tecs_config['CONTROLLER_LB']['vip'] - for role_name, role_configs in tecs_config.items(): - if role_name == "OTHER": - continue - is_ha = re.match(".*_HA$", role_name) is not None - is_lb = re.match(".*_LB$", role_name) is not None - - for host in role_configs['host_interfaces']: - ip_domain_dict = {} - host_mgt = host['management'] - if host_mgt['ip'] not in ip_list: - ip_list.append(host_mgt['ip']) - ip_domain_dict.update({host['name']: host_mgt['ip']}) - domain_ip_list.append(ip_domain_dict) - host_mgt['ip'] = host['name'] - - if is_ha and role_configs.get('vip'): - domain_ip_list.append({'ha-vip': role_configs['vip']}) - if role_configs['ntp_server'] == role_configs['vip']: - role_configs['ntp_server'] = 'ha-vip' - elif role_configs['ntp_server'] == lb_float_ip: - role_configs['ntp_server'] = 'lb-vip' - role_configs['vip'] = 'ha-vip' - - if role_configs.get('public_vip'): - domain_ip_list.append({'public-vip': role_configs['public_vip']}) - if role_configs.get('glance_vip'): - domain_ip_list.append({'glance-vip': role_configs['glance_vip']}) - role_configs['glance_vip'] = 'glance-vip' - if role_configs.get('db_vip'): - domain_ip_list.append({'db-vip': role_configs['db_vip']}) - role_configs['db_vip'] = 'db-vip' - - if is_lb and role_configs.get('vip'): - domain_ip_list.append({'lb-vip': role_configs['vip']}) - role_configs['vip'] = 'lb-vip' - return domain_ip_list - - -def config_dnsmasq_server(host_ip_list, domain_ip_list, password='ossdbg1'): - dns_conf = "/etc/dnsmasq.conf" - for host_ip in host_ip_list: - try: - subprocess.check_output( - "sshpass -p %s ssh -o StrictHostKeyChecking=no %s " - "test -f %s" % (password, host_ip, dns_conf), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - msg = '%s does not exist in %s' % (dns_conf, host_ip) - LOG.error(msg) - raise exception.NotFound(message=msg) - - config_scripts = [ - "sed -i '/^[^#]/s/no-resolv[[:space:]]*/\#no-resolv/'\ - %s" % dns_conf, - "sed -i '/^[^#]/s/no-poll[[:space:]]*/\#no-poll/' %s" % dns_conf, - "cache_size_linenumber=`grep -n 'cache-size=' %s| awk -F ':' " - "'{print $1}'` && [ ! -z $cache_size_linenumber ] && sed -i " - "${cache_size_linenumber}d %s" % (dns_conf, dns_conf), - "echo 'cache-size=3000' >> %s" % dns_conf] - - tecs_cmn.run_scrip(config_scripts, host_ip, password, - msg='Failed to config cache of dns server on %s' % - host_ip) - - config_ip_scripts = [] - for domain_name_ip in domain_ip_list: - domain_name = domain_name_ip.keys()[0] - domain_ip = domain_name_ip.values()[0] - config_ip_scripts.append( - "controller1_linenumber=`grep -n 'address=/%s' %s| awk -F ':' " - "'{print $1}'` && [ ! -z ${controller1_linenumber} ] && " - "sed -i ${controller1_linenumber}d %s" % - (domain_name, dns_conf, dns_conf)) - config_ip_scripts.append("echo 'address=/%s/%s' >> %s" % - (domain_name, domain_ip, dns_conf)) - tecs_cmn.run_scrip(config_ip_scripts, host_ip, password, - 'Failed to config domain-ip of dns server on %s' % - host_ip) - - service_start_scripts = [ - "dns_linenumber=`grep -n \"^[\ - [:space:]]*ExecStart=/usr/sbin/dnsmasq -k\" " - "/usr/lib/systemd/system/dnsmasq.service|cut -d \":\" -f 1` && " - "sed -i \"${dns_linenumber}c ExecStart=/usr/sbin/dnsmasq -k " - "--dns-forward-max=150\" /usr/lib/systemd/system/dnsmasq.service", - "for i in `ps -elf | grep dnsmasq |grep -v grep | awk -F ' ' '{\ - print $4}'`;do kill -9 $i;done ", - "systemctl daemon-reload && systemctl enable dnsmasq.service && " - "systemctl restart dnsmasq.service"] - tecs_cmn.run_scrip(service_start_scripts, host_ip, password, - msg='Failed to start service of dns server on %s' % - host_ip) - - -def config_dnsmasq_client(host_ip_list, ha_ip_list, password='ossdbg1'): - dns_client_file = "/etc/resolv.conf" - tmp_dns_client_file = "/etc/resolv.conf.tmp" - config_scripts = [] - for ha_ip in ha_ip_list: - config_scripts.append("echo 'nameserver %s' >> %s" % - (ha_ip, tmp_dns_client_file)) - config_scripts.append('cat %s > %s' % (tmp_dns_client_file, - dns_client_file)) - config_scripts.append('rm -rf %s' % tmp_dns_client_file) - for host_ip in host_ip_list: - if host_ip not in ha_ip_list: - tecs_cmn.run_scrip(config_scripts, host_ip, password, - msg='Failed to config dns client on %s' % - host_ip) - - tecs_cmn.run_scrip(config_scripts, - msg='Failed to config dns client on daisy host') - - -def config_nodes_hosts(host_ip_list, domain_ip, password='ossdbg1'): - hosts_file = "/etc/hosts" - tmp_hosts_file = "/etc/hosts.tmp" - config_scripts = ['cat /etc/hosts > %s' % tmp_hosts_file] - for name_ip in domain_ip: - config_scripts.append("linenumber=`grep -n '%s$' %s | " - "awk -F ':' '{print $1}'` && " - "[ ! -z $linenumber ] && " - "sed -i ${linenumber}d %s" % - (name_ip.keys()[0], - tmp_hosts_file, tmp_hosts_file)) - config_scripts.append("echo '%s %s' >> %s" % (name_ip.values()[0], - name_ip.keys()[0], - tmp_hosts_file)) - config_scripts.append('cat %s > %s' % (tmp_hosts_file, hosts_file)) - config_scripts.append('rm -rf %s' % tmp_hosts_file) - for host_ip in host_ip_list: - tecs_cmn.run_scrip(config_scripts, host_ip, password, - msg='Failed to config /etc/hosts on %s' % host_ip) - tecs_cmn.run_scrip(config_scripts, - msg='Failed to config /etc/hosts on daisy host') - - -def revise_nova_config(computer_nodes, ha_vip, public_vip, compute_ip_domain, - password='ossdbg1'): - nova_file = "/etc/nova/nova.conf" - for host_ip in computer_nodes: - scripts = [] - if public_vip: - scripts.extend(["linenumber=`grep -n '^novncproxy_base_url' %s | " - "awk -F ':' '{print $1}'`" % nova_file, - 'sed -i "${linenumber}s/public-vip/%s/" %s' % - (public_vip, nova_file)]) - else: - scripts.extend(["linenumber=`grep -n '^novncproxy_base_url' %s | " - "awk -F ':' '{print $1}'`" % nova_file, - 'sed -i "${linenumber}s/ha-vip/%s/" %s' % - (ha_vip, nova_file)]) - scripts.extend(["linenumber=`grep -n '^vncserver_proxyclient_address' " - "%s | awk -F ':' '{print $1}'`" % nova_file, - 'sed -i "${linenumber}s/127.0.0.1/%s/" %s' % - (compute_ip_domain[host_ip], nova_file), - "systemctl restart openstack-nova-compute.service "]) - tecs_cmn.run_scrip(scripts, host_ip, password, - msg='Failed to config nova on %s' % host_ip) - - -def revise_horizon_config(ha_nodes, ha_vip, public_vip, password='ossdbg1'): - dashboard_file = "/etc/httpd/conf.d/15-horizon_vhost.conf" - for host_ip in ha_nodes: - config_scripts = ["linenumber1=`grep -n 'ServerAlias %s' " - "%s| awk -F ':' '{print $1}'` && " - "[ ! -z ${linenumber1} ] && sed -i " - "${linenumber1}d %s" % (host_ip, - dashboard_file, - dashboard_file), - "linenumber2=`grep -n 'ServerAlias \ - %s' %s| awk -F ':' '" - "{print $1}'` && [ ! -z ${linenumber2} ] && sed -i " - "${linenumber2}d %s" % (ha_vip, dashboard_file, - dashboard_file), - "linenumber3=`grep -n 'ServerAlias \ - %s' %s| awk -F ':' '" - "{print $1}'` && [ ! -z ${linenumber3} ] && sed -i " - "${linenumber3}d %s" % (public_vip, dashboard_file, - dashboard_file), - 'dasboard_linenumber1=`grep \ - -n "ServerAlias localhost" ' - '%s|cut -d ":" -f 1` && sed -i \ - "${dasboard_linenumber1}a ' - 'ServerAlias %s" %s' % (dashboard_file, host_ip, - dashboard_file), - 'dasboard_linenumber1=`grep -n \ - "ServerAlias localhost" %s' - '|cut -d ":" -f 1` && sed -i \ - "${dasboard_linenumber1}a ' - 'ServerAlias %s" %s' % (dashboard_file, ha_vip, - dashboard_file)] - if public_vip: - config_scripts.append('dasboard_linenumber2=`grep -n ' - '"ServerAlias localhost" %s|cut ' - '-d ":" -f 1` && sed -i ' - '"${dasboard_linenumber2}a ' - 'ServerAlias %s" %s' % - (dashboard_file, public_vip, - dashboard_file)) - - tecs_cmn.run_scrip(config_scripts, host_ip, password, - msg='Failed to config horizon on %s' % host_ip) - restart_http_scripts = ['systemctl daemon-reload &&' - 'systemctl restart httpd.service'] - try: - subprocess.check_output(restart_http_scripts, shell=True, - stderr=subprocess.STDOUT) - except: - return - - -class TECSInstallTask(Thread): - - """ - Class for install tecs bin. - """ - """ Definition for install states.""" - - def __init__(self, req, cluster_id): - super(TECSInstallTask, self).__init__() - self.req = req - self.cluster_id = cluster_id - self.progress = 0 - self.state = tecs_state['INIT'] - self.message = "" - self.tecs_config_file = '' - self.mgnt_ip_list = '' - self.install_log_fp = None - self.last_line_num = 0 - self.need_install = False - self.ping_times = 36 - self.log_file = "/var/log/daisy/tecs_%s_install.log" % self.cluster_id - self.dns_name_ip = [] - self.password = 'ossdbg1' - self.nodes_ips = {} - - def _check_install_log(self, tell_pos): - with open(self.log_file, "r") as tmp_fp: - tmp_fp.seek(tell_pos, os.SEEK_SET) - line_num = self.last_line_num - for lnum, lcontent in enumerate(tmp_fp, 1): - tell_pos = tmp_fp.tell() - line_num += 1 - LOG.debug("<<>>", line_num, lcontent) - if -1 != lcontent.find("Preparing servers"): - self.progress = 3 - - if -1 != lcontent.find("successfully"): - self.progress = 100 - self.state = tecs_state['ACTIVE'] - elif -1 != lcontent.find("Error") \ - or -1 != lcontent.find("ERROR") \ - or -1 != lcontent.find("error") \ - or -1 != lcontent.find("not found"): - self.state = tecs_state['INSTALL_FAILED'] - self.message = \ - "Tecs install error, see line %s in '%s'" % ( - line_num, self.log_file) - raise exception.InstallException(self.message) - self.last_line_num = line_num - return tell_pos - - def _calc_progress(self, path): - """ - Calculate the progress of installing bin. - :param path: directory contain ".pp" and ".log" files - :return: installing progress(between 1~100) - """ - ppcount = logcount = 0 - for file in os.listdir(path): - if file.endswith(".log"): - logcount += 1 - elif file.endswith(".pp"): - ppcount += 1 - - progress = 0 - if 0 != ppcount: - progress = (logcount * 100.00) / ppcount - return progress - - def _update_install_progress_to_db(self): - """ - Update progress of intallation to db. - :return: - """ - roles = daisy_cmn.get_cluster_roles_detail(self.req, self.cluster_id) - for role in roles: - if role['deployment_backend'] != daisy_cmn.tecs_backend_name: - continue - role_hosts = daisy_cmn.get_hosts_of_role(self.req, role['id']) - for role_host in role_hosts: - if role_host['status'] != tecs_state['ACTIVE']: - self.need_install = True - role_host['status'] = self.state - role_host['progress'] = self.progress - role_host['messages'] = self.message - daisy_cmn.update_role_host( - self.req, role_host['id'], role_host) - role['progress'] = self.progress - role['status'] = self.state - role['messages'] = self.message - daisy_cmn.update_role(self.req, role['id'], role) - - def _generate_tecs_config_file(self, cluster_id, tecs_config): - tecs_config_file = '' - if tecs_config: - cluster_conf_path = daisy_tecs_path + cluster_id - LOG.info(_("Generate tecs config...")) - config.update_tecs_config(tecs_config, cluster_conf_path) - tecs_config_file = cluster_conf_path + "/tecs.conf" - ha_config_file = cluster_conf_path + "/HA_1.conf" - tecs_cmn.mkdir_tecs_install() - cp_ha_conf = "\cp %s /home/tecs_install/" % ha_config_file - tecs_conf = "\cp %s /home/tecs_install/" % tecs_config_file - daisy_cmn.subprocess_call(cp_ha_conf) - daisy_cmn.subprocess_call(tecs_conf) - return tecs_config_file - - def run(self): - try: - start_time = time.time() - self._run() - except Exception as e: - self.state = tecs_state['INSTALL_FAILED'] - self.message = e.message - self._update_install_progress_to_db() - LOG.info(_("TECS version package installed failed for" - " cluster %s." % self.cluster_id)) - LOG.exception(e.message) - else: - if not self.need_install: - return - LOG.info(_("TECS version package installed completely for" - " cluster %s." % self.cluster_id)) - - LOG.info("Config provider ...") - tecs_cmn.inform_provider_cloud_state( - self.req.context, self.cluster_id, operation='add') - time_cost = str(round((time.time() - start_time) / 60, 2)) - LOG.info( - _("It totally takes %s min for installing tecs" % time_cost)) - - if self.dns_name_ip: - LOG.info("Config dns ...") - ha_vip = "" - public_vip = "" - compute_ip_domain = {} - for dns_dict in self.dns_name_ip: - domain_name = dns_dict.keys()[0] - domain_ip = dns_dict.values()[0] - if domain_name == "ha-vip": - ha_vip = domain_ip - if domain_name == "public-vip": - public_vip = domain_ip - if domain_ip in self.nodes_ips['computer']: - compute_ip_domain.update({domain_ip: domain_name}) - - revise_nova_config(self.nodes_ips['computer'], ha_vip, - public_vip, compute_ip_domain) - revise_horizon_config(self.nodes_ips['ha'], ha_vip, public_vip) - - LOG.info("Push configs for installing hosts ...") - config_backend_name = 'clushshell' - backend_driver = manager.configBackend(config_backend_name, - self.req) - - params = {'filters': {'cluster_id': self.cluster_id}} - nodes = registry.get_hosts_detail(self.req.context, **params) - push_nodes_id = [node['id'] for node in nodes - if node['status'] == 'with-role' and - node['role_status'] == tecs_state['INSTALLING']] - components_name = ['nova'] - backend_driver.push_config_by_hosts(push_nodes_id, - components_name) - - LOG.info("Config neutron ...") - result = config.get_conf( - self.tecs_config_file, - neutron_float_ip="CONFIG_NEUTRON_SERVER_HOST", - keystone_float_ip="CONFIG_KEYSTONE_HOST", - neutron_install_mode="CONFIG_NEUTRON_SERVER_INSTALL_MODE", - keystone_install_mode="CONFIG_KEYSTONE_INSTALL_MODE", - lb_float_ip="CONFIG_LB_HOST") - if (result.get('keystone_install_mode', None) == "LB" and - result.get('neutron_install_mode', None) == "LB"): - LOG.info(_("<<>>")) - time.sleep(20) - neutron(self.req, - result.get('lb_float_ip', None), - result.get('lb_float_ip', None), - self.cluster_id) - else: - LOG.info(_("<<>>")) - time.sleep(20) - neutron(self.req, - result.get('neutron_float_ip', None), - result.get('keystone_float_ip', None), - self.cluster_id) - - self.progress = 100 - self.state = tecs_state['ACTIVE'] - self.message = "Tecs installed successfully" - LOG.info(_("TECS installed successfully for cluster %s." - % self.cluster_id)) - finally: - if not self.need_install: - return - self._update_install_progress_to_db() - if self.install_log_fp: - self.install_log_fp.close() - - def _run(self): - """ - Exectue install file(.bin) with sync mode. - :return: - """ - - def executor(**params): - # if subprocsee is failed, we need break - if os.path.exists(self.log_file): - params['tell_pos'] = self._check_install_log( - params.get('tell_pos', 0)) - LOG.debug(_("<<>>")) - if 100 == self.progress: - return params - if 3 == self.progress: - self._update_install_progress_to_db() - # waiting for 'progress_log_location' file exist - if not params.get("if_progress_file_read", None): - if not os.path.exists(self.progress_log_location): - params['if_progress_file_read'] = False - return params - else: - with open(self.progress_log_location, "r") as fp: - line = fp.readline() - self.progress_logs_path = line.split( - '\n')[0] + "/manifests" - LOG.info(_("TECS installation log path: %s." - % self.progress_logs_path)) - params['if_progress_file_read'] = True - - # waiting for 'self.progress_logs_path' file exist - if not os.path.exists(self.progress_logs_path): - return params - - LOG.debug(_("<<>>")) - - # cacl progress & sync to db - progress = self._calc_progress(self.progress_logs_path) - if progress == 100: - self.progress = 100 - elif self.progress != progress and progress >= 3: - self.progress = progress - self.state = tecs_state['INSTALLING'] - self._update_install_progress_to_db() - - return params - - tecs_config = get_cluster_tecs_config(self.req, self.cluster_id) - name_ip_list, self.nodes_ips, ha_nodes_ip =\ - get_host_name_and_mgnt_ip(tecs_config) - - self.mgnt_ip_list = (self.nodes_ips['ha'] | - self.nodes_ips['lb'] | - self.nodes_ips['computer']) - - # after os is installed successfully, if ping all role hosts - # management ip successfully, begin to install TECS - unreached_hosts = daisy_cmn.check_ping_hosts(self.mgnt_ip_list, - self.ping_times) - if unreached_hosts: - self.message =\ - "ping hosts %s failed" % ','.join(unreached_hosts) - raise exception.InstallException(self.message) - else: - # os maybe not reboot completely, wait for 10s. - time.sleep(10) - - # delete daisy server known_hosts file to avoid - # ssh command failed because of incorrect host key. - daisy_cmn.subprocess_call('rm -rf /root/.ssh/known_hosts') - - self.progress = 0 - self.message = "Preparing for TECS installation" - self._update_install_progress_to_db() - if not self.need_install: - LOG.info(_("No host in cluster %s need to install tecs." - % self.cluster_id)) - return - - LOG.info(_("Begin to trust me for all nodes with role...")) - root_passwd = 'ossdbg1' - - daisy_cmn.trust_me(self.mgnt_ip_list, root_passwd) - tecs_cmn.mkdir_tecs_install(self.mgnt_ip_list) - - if tecs_config['OTHER']['cluster_data']['use_dns']: - self.dns_name_ip =\ - replace_ip_with_domain_name(self.req, tecs_config) - storage_ip_list = tecs_cmn.get_storage_name_ip_dict( - self.req, self.cluster_id, 'STORAGE') - - self.dns_name_ip.extend(storage_ip_list) - tecs_config['OTHER'].update({'dns_config': self.dns_name_ip}) - - config_dnsmasq_server(ha_nodes_ip, self.dns_name_ip) - config_dnsmasq_client(self.mgnt_ip_list, ha_nodes_ip) - config_nodes_hosts(self.mgnt_ip_list, self.dns_name_ip) - host_domain = [name_ip.keys()[0] for name_ip in self.dns_name_ip - if name_ip.values()[0] in self.mgnt_ip_list] - - unreached_hosts = daisy_cmn.check_ping_hosts(host_domain, - self.ping_times) - if unreached_hosts: - self.message = "ping hosts %s failed after DNS\ - configuration" % ','.join(unreached_hosts) - raise exception.InstallException(self.message) - else: - config_nodes_hosts(self.mgnt_ip_list, name_ip_list) - # generate tecs config must be after ping check - self.tecs_config_file =\ - self._generate_tecs_config_file(self.cluster_id, - tecs_config) - - # install network-configuration-1.1.1-15.x86_64.rpm - if self.mgnt_ip_list: - for mgnt_ip in self.mgnt_ip_list: - LOG.info(_("Begin to install network-configuration\ - on %s" % mgnt_ip)) - tecs_cmn.TecsShellExector(mgnt_ip, 'install_rpm') - # network-configuration will restart network, - # wait until ping test successfully - time.sleep(10) - unreached_hosts = daisy_cmn.check_ping_hosts(self.mgnt_ip_list, - self.ping_times) - if unreached_hosts: - self.message = "ping hosts %s failed after network\ - configuration" % ','.join(unreached_hosts) - raise exception.InstallException(self.message) - - (share_disk_info, volume_disk_info, share_cluster_disk_info) =\ - disk_array.get_disk_array_info(self.req, self.cluster_id) - - if volume_disk_info: - LOG.info(_("Begin to config multipth on compute nodes...")) - disk_array.config_compute_multipath(self.nodes_ips['computer']) - - if share_disk_info or share_cluster_disk_info: - LOG.info(_("Begin to config multipth on HA nodes...")) - disk_array.config_compute_multipath(self.nodes_ips['ha']) - LOG.info(_("Begin to config Disk Array on ha nodes...")) - array_nodes_addr =\ - tecs_cmn.get_disk_array_nodes_addr(self.req, self.cluster_id) - disk_array.config_ha_share_disk(share_disk_info, - array_nodes_addr['ha'], - share_cluster_disk_info) - - # check and get TECS version - tecs_version_pkg_file =\ - tecs_cmn.check_and_get_tecs_version(daisy_tecs_path) - if not tecs_version_pkg_file: - self.state = tecs_state['INSTALL_FAILED'] - self.message =\ - "TECS version file not found in %s" % daisy_tecs_path - raise exception.NotFound(message=self.message) - - # use pattern 'tecs_%s_install' to distinguish - # multi clusters installation - LOG.info(_("Open log file for TECS installation.")) - self.install_log_fp = open(self.log_file, "w+") - - # delete cluster_id file before installing, - # in case getting old log path - self.progress_log_location =\ - "/var/tmp/packstack/%s" % self.cluster_id - if os.path.exists(self.progress_log_location): - os.remove(self.progress_log_location) - - install_cmd = "sudo %s conf_file %s" % (tecs_version_pkg_file, - self.tecs_config_file) - LOG.info(_("Begin to install TECS in cluster %s." % self.cluster_id)) - subprocess.Popen(install_cmd, - shell=True, - stdout=self.install_log_fp, - stderr=self.install_log_fp) - - self.progress = 1 - self.state = tecs_state['INSTALLING'] - self.message = "TECS installing" - self._update_install_progress_to_db() - # if clush_bin is not terminate - # while not clush_bin.returncode: - params = {} # executor params - execute_times = 0 # executor run times - while True: - time.sleep(5) - if self.progress == 100: - if volume_disk_info and self.nodes_ips['ha']: - LOG.info(_("Begin to config cinder volume...")) - disk_array.config_ha_cinder_volume(volume_disk_info, - self.nodes_ips['ha']) - break - elif execute_times >= 1440: - self.state = tecs_state['INSTALL_FAILED'] - self.message = "TECS install timeout for 2 hours" - raise exception.InstallTimeoutException( - cluster_id=self.cluster_id) - params = executor( - # just read cluster_id file once in 'while' - if_progress_file_read=params.get( - "if_progress_file_read", False), - # current fp location of tecs_install.log - tell_pos=params.get("tell_pos", 0)) - - # get clush_bin.returncode - # clush_bin.poll() - execute_times += 1 diff --git a/code/daisy/daisy/api/backends/tecs/uninstall.py b/code/daisy/daisy/api/backends/tecs/uninstall.py deleted file mode 100755 index c555d8d0..00000000 --- a/code/daisy/daisy/api/backends/tecs/uninstall.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/hosts endpoint for Daisy v1 API -""" - -import subprocess - -from oslo_log import log as logging -from daisy import i18n -import daisy.api.backends.common as daisy_cmn -import daisy.api.backends.tecs.common as tecs_cmn - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW - -tecs_state = tecs_cmn.TECS_STATE - - -def update_progress_to_db(req, role_id_list, status, hosts_list, host_ip=None): - """ - Write uninstall progress and status to db, - we use global lock object 'uninstall_mutex' - to make sure this function is thread safety. - :param req: http req. - :param role_id_list: Column neeb be update in role table. - :param status: Uninstall status. - :return: - """ - for role_id in role_id_list: - role_hosts = daisy_cmn.get_hosts_of_role(req, role_id) - for host_id_ip in hosts_list: - host_ip_tmp = host_id_ip.values()[0] - host_id_tmp = host_id_ip.keys()[0] - if host_ip: - for role_host in role_hosts: - if (host_ip_tmp == host_ip and - role_host['host_id'] == host_id_tmp): - role_host_meta = {} - if 0 == cmp(status, tecs_state['UNINSTALLING']): - role_host_meta['progress'] = 10 - role_host_meta['messages'] = 'TECS uninstalling' - if 0 == cmp(status, tecs_state['UNINSTALL_FAILED']): - role_host_meta[ - 'messages'] = 'TECS uninstalled failed' - elif 0 == cmp(status, tecs_state['INIT']): - role_host_meta['progress'] = 100 - role_host_meta[ - 'messages'] = 'TECS uninstalled successfully' - if role_host_meta: - role_host_meta['status'] = status - daisy_cmn.update_role_host(req, role_host['id'], - role_host_meta) - else: - role = {} - if 0 == cmp(status, tecs_state['UNINSTALLING']): - for role_host in role_hosts: - role_host_meta = {} - role_host_meta['status'] = status - role_host_meta['progress'] = 0 - daisy_cmn.update_role_host(req, - role_host['id'], - role_host_meta) - role['progress'] = 0 - role['messages'] = 'TECS uninstalling' - if 0 == cmp(status, tecs_state['UNINSTALL_FAILED']): - role['messages'] = 'TECS uninstalled failed' - elif 0 == cmp(status, tecs_state['INIT']): - role['progress'] = 100 - role['messages'] = 'TECS uninstalled successfully' - if role: - role['status'] = status - daisy_cmn.update_role(req, role_id, role) - - -def _thread_bin(req, host_ip, role_id_list, hosts_list): - # uninstall network-configuration-1.1.1-15.x86_64.rpm - update_progress_to_db( - req, role_id_list, tecs_state['UNINSTALLING'], hosts_list, host_ip) - tecs_cmn.TecsShellExector(host_ip, 'uninstall_rpm') - - cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/' - daisy_cmn.subprocess_call(cmd) - password = "ossdbg1" - var_log_path = "/var/log/daisy/daisy_uninstall/\ - %s_uninstall_tecs.log" % host_ip - with open(var_log_path, "w+") as fp: - cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) - daisy_cmn.subprocess_call(cmd, fp) - cmd = 'clush -S -b -w %s "rm -rf /home/daisy_uninstall"' % (host_ip,) - daisy_cmn.subprocess_call(cmd, fp) - cmd = 'clush -S -w %s "mkdir -p /home/daisy_uninstall"' % (host_ip,) - daisy_cmn.subprocess_call(cmd, fp) - - try: - subprocess.check_output( - 'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin \ - --dest=/home/daisy_uninstall' % ( - host_ip,), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - update_progress_to_db( - req, role_id_list, tecs_state[ - 'UNINSTALL_FAILED'], hosts_list, host_ip) - LOG.error(_("scp TECS bin for %s failed!" % host_ip)) - fp.write(e.output.strip()) - - cmd = 'clush -S -w %s "chmod 777 /home/daisy_uninstall/*"' % (host_ip,) - daisy_cmn.subprocess_call(cmd, fp) - - try: - exc_result = subprocess.check_output( - 'clush -S -w %s /home/daisy_uninstall/ZXTECS*.bin clean' % ( - host_ip,), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - update_progress_to_db( - req, role_id_list, tecs_state[ - 'UNINSTALL_FAILED'], hosts_list, host_ip) - LOG.error(_("Uninstall TECS for %s failed!" % host_ip)) - fp.write(e.output.strip()) - else: - update_progress_to_db(req, role_id_list, tecs_state['INIT'], - hosts_list, host_ip) - LOG.info(_("Uninstall TECS for %s successfully!" % host_ip)) - fp.write(exc_result) -# this will be raise raise all the exceptions of the thread to log file - - -def thread_bin(req, host_ip, role_id_list, hosts_list): - try: - _thread_bin(req, host_ip, role_id_list, hosts_list) - except Exception as e: - LOG.exception(e.message) diff --git a/code/daisy/daisy/api/backends/tecs/upgrade.py b/code/daisy/daisy/api/backends/tecs/upgrade.py deleted file mode 100755 index 1bf5b60b..00000000 --- a/code/daisy/daisy/api/backends/tecs/upgrade.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/update endpoint for Daisy v1 API -""" - -import subprocess - -from oslo_log import log as logging -from daisy import i18n -import daisy.api.backends.common as daisy_cmn -import daisy.api.backends.tecs.common as tecs_cmn - - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW - -tecs_state = tecs_cmn.TECS_STATE - - -def update_progress_to_db(req, role_id_list, status, hosts_list, host_ip=None): - """ - Write update progress and status to db, - to make sure this function is thread safety. - :param req: http req. - :param role_id_list: Column neeb be update in role table. - :param status: Update status. - :return: - """ - for role_id in role_id_list: - role_hosts = daisy_cmn.get_hosts_of_role(req, role_id) - for host_id_ip in hosts_list: - host_ip_tmp = host_id_ip.values()[0] - host_id_tmp = host_id_ip.keys()[0] - if host_ip: - for role_host in role_hosts: - if (host_ip_tmp == host_ip and - role_host['host_id'] == host_id_tmp): - role_host_meta = {} - if 0 == cmp(status, tecs_state['UPDATING']): - role_host_meta['progress'] = 10 - role_host_meta['messages'] = 'TECS upgrading' - if 0 == cmp(status, tecs_state['UPDATE_FAILED']): - role_host_meta['messages'] = 'TECS upgraded failed' - elif 0 == cmp(status, tecs_state['ACTIVE']): - role_host_meta['progress'] = 100 - role_host_meta[ - 'messages'] = 'TECS upgraded successfully' - if role_host_meta: - role_host_meta['status'] = status - daisy_cmn.update_role_host(req, - role_host['id'], - role_host_meta) - else: - role = {} - if 0 == cmp(status, tecs_state['UPDATING']): - for role_host in role_hosts: - if role_host['status'] == tecs_state['INSTALL_FAILED']: - continue - role_host_meta = {} - role_host_meta['status'] = status - role_host_meta['progress'] = 0 - role_host_meta['messages'] = 'TECS upgrading' - daisy_cmn.update_role_host(req, - role_host['id'], - role_host_meta) - role['progress'] = 0 - role['messages'] = 'TECS upgrading' - if 0 == cmp(status, tecs_state['UPDATE_FAILED']): - role['messages'] = 'TECS upgraded failed' - elif 0 == cmp(status, tecs_state['ACTIVE']): - role['progress'] = 100 - role['messages'] = 'TECS upgraded successfully' - if role: - role['status'] = status - daisy_cmn.update_role(req, role_id, role) - - -def thread_bin(req, role_id_list, host_ip, hosts_list): - # update network-configuration-1.1.1-15.x86_64.rpm - update_progress_to_db( - req, role_id_list, tecs_state['UPDATING'], hosts_list, host_ip) - cmd = 'mkdir -p /var/log/daisy/daisy_update/' - daisy_cmn.subprocess_call(cmd) - password = "ossdbg1" - var_log_path = "/var/log/daisy/daisy_update/%s_update_tecs.log" % host_ip - with open(var_log_path, "w+") as fp: - cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) - daisy_cmn.subprocess_call(cmd, fp) - cmd = 'clush -S -w %s "mkdir -p /home/tecs_update/"' % (host_ip,) - daisy_cmn.subprocess_call(cmd, fp) - cmd = 'clush -S -b -w %s "rm -rf /home/tecs_update/ZXTECS*.bin"' % ( - host_ip,) - daisy_cmn.subprocess_call(cmd, fp) - tecs_cmn.TecsShellExector(host_ip, 'update_rpm') - try: - subprocess.check_output( - 'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin \ - --dest=/home/tecs_update' % ( - host_ip,), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - update_progress_to_db( - req, role_id_list, tecs_state[ - 'UPDATE_FAILED'], hosts_list, host_ip) - LOG.error(_("scp TECS bin for %s failed!" % host_ip)) - fp.write(e.output.strip()) - return 1 - - cmd = 'clush -S -w %s "chmod 777 /home/tecs_update/*"' % (host_ip,) - daisy_cmn.subprocess_call(cmd, fp) - - try: - exc_result = subprocess.check_output( - 'clush -S -w %s "/home/tecs_update/ZXTECS*.bin upgrade"' % ( - host_ip,), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - update_progress_to_db( - req, role_id_list, tecs_state[ - 'UPDATE_FAILED'], hosts_list, host_ip) - LOG.error(_("Update TECS for %s failed!" % host_ip)) - fp.write(e.output.strip()) - return 2 - else: - update_progress_to_db( - req, role_id_list, tecs_state['ACTIVE'], hosts_list, host_ip) - fp.write(exc_result) - return 0 diff --git a/code/daisy/daisy/api/backends/tecs/write_configs.py b/code/daisy/daisy/api/backends/tecs/write_configs.py deleted file mode 100755 index 28b44952..00000000 --- a/code/daisy/daisy/api/backends/tecs/write_configs.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/install endpoint for tecs API -""" -import daisy.registry.client.v1.api as registry -import daisy.api.backends.common as daisy_cmn -from daisy.common import utils - - -def _write_role_configs_to_db(req, cluster_id, role_name, configs): - config_meta = {'cluster': cluster_id, - 'role': role_name, - 'config': configs} - registry.config_interface_metadata(req.context, - config_meta) - - -def _write_host_configs_to_db(req, host_id, configs): - config_meta = {'host_id': host_id, - 'config': configs} - registry.config_interface_metadata(req.context, - config_meta) - - -def _get_config_item(file, section, key, value, description): - return {'file-name': file, - 'section': section, - 'key': key, - 'value': value, - 'description': description} - - -def _add_configs_for_nova(req, host_detail): - config_file = '/etc/nova/nova.conf' - default_section = 'DEFAULT' - - key_name = 'vcpu_pin_set' - key_value = host_detail.get(key_name) - config_items = [] - if not key_value: - key_value = host_detail.get('isolcpus') - - nova_key_name = key_name - description = 'vcpu pin set for all vm' - item = _get_config_item(config_file, - default_section, - nova_key_name, - key_value, - description) - config_items.append(item) - - key_name = 'dvs_high_cpuset' - key_value = host_detail.get(key_name) - - nova_key_name = 'dvs_high_cpu_set' - description = 'vcpu pin set for high-performance dvs vm' - item = _get_config_item(config_file, - default_section, - nova_key_name, - key_value, - description) - config_items.append(item) - - numa_cpus = utils.get_numa_node_cpus(host_detail.get('cpu', {})) - numa_nodes = utils.get_numa_node_from_cpus(numa_cpus, key_value) - if numa_nodes: - libvirt_section = 'libvirt' - nova_key_name = 'reserved_huge_pages' - # only support one NUMA node for DVS now - key_value = 'node:%s,size:1048576,count:4' % numa_nodes[0] - description = 'reserved huges for DVS service '\ - 'on high NUMA node' - config_items.append({'file-name': config_file, - 'key': nova_key_name, - 'section': libvirt_section, - 'value': key_value, - 'description': description}) - - key_name = 'pci_high_cpuset' - pci_key_value = host_detail.get(key_name) - - nova_key_name = 'vsg_card_cpu_set' - description = 'vcpu pin set for high-performance CLC card vm' - item = _get_config_item(config_file, - default_section, - nova_key_name, - pci_key_value, - description) - config_items.append(item) - if pci_key_value: - nova_key_name = 'default_ephemeral_format' - description = 'config for CLC card' - key_value = 'ext3' - item = _get_config_item(config_file, - default_section, - nova_key_name, - key_value, - description) - config_items.append(item) - - nova_key_name = 'pci_passthrough_whitelist' - description = 'config for CLC card' - key_value = '[{"vendor_id": "8086","product_id": "0435"}]' - item = _get_config_item(config_file, - default_section, - nova_key_name, - key_value, - description) - config_items.append(item) - - _write_host_configs_to_db(req, - host_detail['id'], - config_items) - - -def update_configset(req, cluster_id): - roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) - for role in roles: - # now only computer has configs - if role['name'] != 'COMPUTER': - continue - role_meta = {'config_set_update_progress': 0} - daisy_cmn.update_role(req, role['id'], role_meta) - - role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) - for host in role_hosts: - host_detail = daisy_cmn.get_host_detail(req, host['host_id']) - _add_configs_for_nova(req, host_detail) diff --git a/code/daisy/daisy/api/backends/zenic/__init__.py b/code/daisy/daisy/api/backends/zenic/__init__.py deleted file mode 100755 index e69de29b..00000000 diff --git a/code/daisy/daisy/api/backends/zenic/api.py b/code/daisy/daisy/api/backends/zenic/api.py deleted file mode 100755 index df71c11e..00000000 --- a/code/daisy/daisy/api/backends/zenic/api.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/install endpoint for zenic API -""" -import time - -from oslo_log import log as logging - -import threading -from daisy import i18n - -from daisy.common import exception -from daisy.api.backends import driver -import daisy.api.backends.common as daisy_cmn -import daisy.api.backends.zenic.common as zenic_cmn -import daisy.api.backends.zenic.install as instl -import daisy.api.backends.zenic.uninstall as unstl -import daisy.api.backends.zenic.upgrade as upgrd - - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW - -zenic_state = zenic_cmn.ZENIC_STATE - - -class API(driver.DeploymentDriver): - - def __init__(self): - super(API, self).__init__() - return - - def install(self, req, cluster_id): - """ - Install zenic to a cluster. - - param req: The WSGI/Webob Request object - cluster_id:cluster id - """ - - # instl.pxe_server_build(req, install_meta) - # get hosts config which need to install OS - # hosts_need_os = instl.get_cluster_hosts_config(req, cluster_id) - # if have hosts need to install os, ZENIC installataion executed - # in OSInstallTask - # if hosts_need_os: - # os_install_obj = instl.OSInstallTask(req, cluster_id, hosts_need_os) - # os_install_thread = Thread(target=os_install_obj.run) - # os_install_thread.start() - # else: - LOG.info( - _("No host need to install os, begin install ZENIC for cluster %s." - % cluster_id)) - zenic_install_task = instl.ZENICInstallTask(req, cluster_id) - zenic_install_task.start() - - LOG.info((_("begin install zenic, please waiting...."))) - time.sleep(5) - LOG.info((_("install zenic successfully"))) - - def uninstall(self, req, cluster_id): - """ - Uninstall ZENIC to a cluster. - - :param req: The WSGI/Webob Request object - - :raises HTTPBadRequest if x-install-cluster is missing - """ - - (role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list( - req, cluster_id) - if role_id_list: - if not hosts_list: - msg = _("there is no host in cluster %s") % cluster_id - raise exception.ThreadBinException(msg) - - unstl.update_progress_to_db( - req, role_id_list, zenic_state['UNINSTALLING'], 0.0) - uninstall_progress_percentage =\ - round(1 * 1.0 / len(hosts_list), 2) * 100 - - threads = [] - for host in hosts_list: - t = threading.Thread(target=unstl.thread_bin, args=( - req, host, role_id_list, uninstall_progress_percentage)) - t.setDaemon(True) - t.start() - threads.append(t) - LOG.info(_("uninstall threads have started, please waiting....")) - - try: - for t in threads: - t.join() - except: - LOG.warn(_("Join uninstall thread %s failed!" % t)) - else: - uninstall_failed_flag = False - for role_id in role_id_list: - role = daisy_cmn.get_role_detail(req, role_id) - if role['progress'] == 100: - unstl.update_progress_to_db( - req, role_id_list, zenic_state['UNINSTALL_FAILED']) - uninstall_failed_flag = True - break - if role['status'] == zenic_state['UNINSTALL_FAILED']: - uninstall_failed_flag = True - break - if not uninstall_failed_flag: - LOG.info( - _("all uninstall threads have done,\ - set all roles status to 'init'!")) - unstl.update_progress_to_db( - req, role_id_list, zenic_state['INIT']) - - LOG.info((_("begin uninstall zenic, please waiting...."))) - time.sleep(5) - LOG.info((_("uninstall zenic successfully"))) - - def upgrade(self, req, cluster_id): - """ - update zenic to a cluster. - - :param req: The WSGI/Webob Request object - - :raises HTTPBadRequest if x-install-cluster is missing - - """ - (role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list( - req, cluster_id) - if not hosts_list: - msg = _("there is no host in cluster %s") % cluster_id - raise exception.ThreadBinException(msg) - - upgrd.update_progress_to_db( - req, role_id_list, zenic_state['UPDATING'], 0.0) - update_progress_percentage = round(1 * 1.0 / len(hosts_list), 2) * 100 - - threads = [] - for host in hosts_list: - t = threading.Thread(target=upgrd.thread_bin, args=( - req, host, role_id_list, update_progress_percentage)) - t.setDaemon(True) - t.start() - threads.append(t) - LOG.info(_("upgrade threads have started, please waiting....")) - - try: - for t in threads: - t.join() - except: - LOG.warn(_("Join upgrade thread %s failed!" % t)) - else: - update_failed_flag = False - for role_id in role_id_list: - role = daisy_cmn.get_role_detail(req, role_id) - if role['progress'] == 0: - upgrd.update_progress_to_db( - req, role_id_list, zenic_state['UPDATE_FAILED']) - update_failed_flag = True - break - if role['status'] == zenic_state['UPDATE_FAILED']: - update_failed_flag = True - break - if not update_failed_flag: - LOG.info( - _("all update threads have done, \ - set all roles status to 'active'!")) - upgrd.update_progress_to_db( - req, role_id_list, zenic_state['ACTIVE']) diff --git a/code/daisy/daisy/api/backends/zenic/common.py b/code/daisy/daisy/api/backends/zenic/common.py deleted file mode 100755 index 31b44165..00000000 --- a/code/daisy/daisy/api/backends/zenic/common.py +++ /dev/null @@ -1,319 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/install endpoint for zenic API -""" -import os -import copy -import subprocess -from oslo_log import log as logging -from webob.exc import HTTPBadRequest - -from daisy import i18n - -from daisy.common import exception -import daisy.registry.client.v1.api as registry -import daisy.api.backends.common as daisy_cmn - - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW - -daisy_zenic_path = '/var/lib/daisy/zenic/' -ZENIC_STATE = { - 'INIT': 'init', - 'INSTALLING': 'installing', - 'ACTIVE': 'active', - 'INSTALL_FAILED': 'install-failed', - 'UNINSTALLING': 'uninstalling', - 'UNINSTALL_FAILED': 'uninstall-failed', - 'UPDATING': 'updating', - 'UPDATE_FAILED': 'update-failed', -} - - -def get_cluster_hosts(req, cluster_id): - try: - cluster_hosts = registry.get_cluster_hosts(req.context, cluster_id) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return cluster_hosts - - -def get_host_detail(req, host_id): - try: - host_detail = registry.get_host_metadata(req.context, host_id) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return host_detail - - -def get_roles_detail(req): - try: - roles = registry.get_roles_detail(req.context) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return roles - - -def get_hosts_of_role(req, role_id): - try: - hosts = registry.get_role_host_metadata(req.context, role_id) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return hosts - - -def get_role_detail(req, role_id): - try: - role = registry.get_role_metadata(req.context, role_id) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return role - - -def update_role(req, role_id, role_meta): - try: - registry.update_role_metadata(req.context, role_id, role_meta) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - - -def update_role_host(req, role_id, role_host): - try: - registry.update_role_host_metadata(req.context, role_id, role_host) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - - -def delete_role_hosts(req, role_id): - try: - registry.delete_role_host_metadata(req.context, role_id) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - - -def _get_cluster_network(cluster_networks, network_type): - network = [cn for cn in cluster_networks - if cn['name'] in network_type] - if not network or not network[0]: - msg = "network %s is not exist" % (network_type) - raise exception.InvalidNetworkConfig(msg) - else: - return network[0] - - -def get_host_interface_by_network(host_detail, network_type): - host_detail_info = copy.deepcopy(host_detail) - interface_list = [hi for hi in host_detail_info['interfaces'] - for assigned_network in hi['assigned_networks'] - if assigned_network and - network_type == assigned_network['name']] - interface = {} - if interface_list: - interface = interface_list[0] - - if not interface: - msg = "network %s of host %s is not exist" % ( - network_type, host_detail_info['id']) - raise exception.InvalidNetworkConfig(msg) - - return interface - - -def get_host_network_ip(req, host_detail, cluster_networks, network_type): - interface_network_ip = '' - host_interface = get_host_interface_by_network(host_detail, network_type) - if host_interface: - network = _get_cluster_network(cluster_networks, network_type) - assigned_network = daisy_cmn.get_assigned_network(req, - host_interface['id'], - network['id']) - interface_network_ip = assigned_network['ip'] - - if not interface_network_ip: - msg = "%s network ip of host %s can't be empty" % ( - network_type, host_detail['id']) - raise exception.InvalidNetworkConfig(msg) - return interface_network_ip - - -def get_deploy_node_cfg(req, host_detail, cluster_networks): - host_deploy_network = get_host_interface_by_network( - host_detail, 'DEPLOYMENT') - host_deploy_ip = get_host_network_ip( - req, host_detail, cluster_networks, 'DEPLOYMENT') - if not host_deploy_ip: - msg = "deployment ip of host %s can't be empty" % host_detail['id'] - raise exception.InvalidNetworkConfig(msg) - host_deploy_macname = host_deploy_network['name'] - if not host_deploy_macname: - msg = "deployment macname of host %s can't be empty" % host_detail[ - 'id'] - raise exception.InvalidNetworkConfig(msg) - - host_mgt_ip = get_host_network_ip( - req, host_detail, cluster_networks, 'MANAGEMENT') - if not host_mgt_ip: - msg = "management ip of host %s can't be empty" % host_detail['id'] - raise exception.InvalidNetworkConfig(msg) - - memmode = 'tiny' - host_memory = 0 - - # if host_detail.has_key('memory'): - if 'memory' in host_detail: - host_memory = ( - int(host_detail['memory'][ - 'total'].strip().split()[0])) / (1024 * 1024) - - if host_memory < 8: - memmode = 'tiny' - elif host_memory < 16: - memmode = 'small' - elif host_memory < 32: - memmode = 'medium' - else: - memmode = 'large' - - deploy_node_cfg = {} - deploy_node_cfg.update({'hostid': host_detail['id']}) - deploy_node_cfg.update({'hostname': host_detail['name']}) - deploy_node_cfg.update({'nodeip': host_deploy_ip}) - deploy_node_cfg.update({'MacName': host_deploy_macname}) - deploy_node_cfg.update({'memmode': memmode}) - deploy_node_cfg.update({'mgtip': host_mgt_ip}) - return deploy_node_cfg - - -def get_roles_and_hosts_list(req, cluster_id): - roles_id_list = set() - hosts_id_list = set() - hosts_list = [] - - cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) - roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) - for role in roles: - if role['deployment_backend'] != daisy_cmn.zenic_backend_name: - continue - role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) - if role_hosts: - for role_host in role_hosts: - if role_host['host_id'] not in hosts_id_list: - host = daisy_cmn.get_host_detail(req, role_host['host_id']) - host_ip = get_host_network_ip( - req, host, cluster_networks, 'MANAGEMENT') - hosts_id_list.add(host['id']) - - host_cfg = {} - host_cfg['mgtip'] = host_ip - host_cfg['rootpwd'] = host['root_pwd'] - hosts_list.append(host_cfg) - - roles_id_list.add(role['id']) - - return (roles_id_list, hosts_list) - - -def check_and_get_zenic_version(daisy_zenic_pkg_path): - zenic_version_pkg_file = "" - zenic_version_pkg_name = "" - get_zenic_version_pkg = "ls %s| grep ^ZENIC.*\.zip$" % daisy_zenic_pkg_path - obj = subprocess.Popen(get_zenic_version_pkg, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - (stdoutput, erroutput) = obj.communicate() - if stdoutput: - zenic_version_pkg_name = stdoutput.split('\n')[0] - zenic_version_pkg_file = daisy_zenic_pkg_path + zenic_version_pkg_name - chmod_for_zenic_version = 'chmod +x %s' % zenic_version_pkg_file - daisy_cmn.subprocess_call(chmod_for_zenic_version) - return (zenic_version_pkg_file, zenic_version_pkg_name) - - -class ZenicShellExector(): - - """ - Class config task before install zenic bin. - """ - - def __init__(self, mgt_ip, task_type, params={}): - self.task_type = task_type - self.mgt_ip = mgt_ip - self.params = params - self.clush_cmd = "" - self.PKG_NAME = self.params['pkg_name'] - self.PKG_PATH = daisy_zenic_path + self.PKG_NAME - self.CFG_PATH = daisy_zenic_path + mgt_ip + "_zenic.conf" - self.oper_type = { - 'install': self._install_pkg - } - self.oper_shell = { - 'CMD_SSHPASS_PRE': "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s", - 'CMD_CFG_SCP': "scp %(path)s root@%(ssh_ip)s:/etc/zenic/config" % - {'path': self.CFG_PATH, 'ssh_ip': mgt_ip}, - 'CMD_PKG_UNZIP': "unzip /home/workspace/%(pkg_name)s \ - -d /home/workspace/PKG" % {'pkg_name': self.PKG_NAME}, - 'CMD_PKG_SCP': "scp %(path)s root@%(ssh_ip)s:/home/workspace/" % - {'path': self.PKG_PATH, 'ssh_ip': mgt_ip} - } - - self._execute() - - def _install_pkg(self): - if not os.path.exists(self.CFG_PATH): - LOG.error(_("<<>>" % self.CFG_PATH)) - return - - if not os.path.exists(self.PKG_PATH): - LOG.error(_("<<>>" % self.PKG_PATH)) - return - - self.clush_cmd = "%s;%s;%s" % \ - (self.oper_shell['CMD_SSHPASS_PRE'] % - {"ssh_ip": "", "cmd": self.oper_shell['CMD_PKG_SCP']}, - self.oper_shell['CMD_SSHPASS_PRE'] % - {"ssh_ip": "", "cmd": self.oper_shell['CMD_CFG_SCP']}, - self.oper_shell['CMD_SSHPASS_PRE'] % - {"ssh_ip": "ssh " + self.mgt_ip, "cmd": self.oper_shell[ - 'CMD_PKG_UNZIP']}) - - subprocess.check_output( - self.clush_cmd, shell=True, stderr=subprocess.STDOUT) - - def _execute(self): - try: - if not self.task_type or not self.mgt_ip: - LOG.error( - _("<<>>")) - return - - self.oper_type[self.task_type]() - except subprocess.CalledProcessError as e: - LOG.warn( - _("<<>>" % e.output.strip())) - except Exception as e: - LOG.exception(_(e.message)) - else: - LOG.info( - _("<<>>" % self.clush_cmd)) diff --git a/code/daisy/daisy/api/backends/zenic/config.py b/code/daisy/daisy/api/backends/zenic/config.py deleted file mode 100755 index d8934e88..00000000 --- a/code/daisy/daisy/api/backends/zenic/config.py +++ /dev/null @@ -1,130 +0,0 @@ -# -*- coding: utf-8 -*- -import os -from ConfigParser import ConfigParser - - -default_zenic_conf_template_path = "/var/lib/daisy/zenic/" -zenic_conf_template_path = default_zenic_conf_template_path - - -def update_conf(zenic, key, value): - zenic.set("general", key, value) - - -def get_conf(zenic_conf_file, **kwargs): - result = {} - if not kwargs: - return result - - zenic = ConfigParser() - zenic.optionxform = str - zenic.read(zenic_conf_file) - - result = {key: zenic.get("general", kwargs.get(key, None)) - for key in kwargs.keys() - if zenic.has_option("general", kwargs.get(key, None))} - return result - - -def get_nodeid(deploy_ip, zbp_ips): - nodeid = 0 - i = 0 - for ip in zbp_ips: - if deploy_ip == ip: - break - else: - i = i + 1 - - if i == 0: - nodeid = 1 - elif i == 1: - nodeid = 256 - else: - nodeid = i - - return nodeid - - -def update_zenic_conf(config_data, cluster_conf_path): - print "zenic config data is:" - import pprint - pprint.pprint(config_data) - - daisy_zenic_path = zenic_conf_template_path - zenic_conf_template_file = os.path.join(daisy_zenic_path, "zenic.conf") - if not os.path.exists(cluster_conf_path): - os.makedirs(cluster_conf_path) - - zenic = ConfigParser() - zenic.optionxform = str - zenic.read(zenic_conf_template_file) - - zbpips = '' - for ip in config_data['zbp_ips']: - if not zbpips: - zbpips = ip - else: - zbpips = zbpips + ',' + ip - update_conf(zenic, 'zbpips', zbpips) - update_conf(zenic, 'zbp_node_num', config_data['zbp_node_num']) - nodelist = '1,256' - if len(config_data['zbp_ips']) > 2: - for i in range(2, len(config_data['zbp_ips'])): - nodelist = nodelist + ',' + 'i' - update_conf(zenic, 'zbpnodelist', nodelist) - - zampips = '' - for ip in config_data['zamp_ips']: - if not zampips: - zampips = ip - else: - zampips = zampips + ',' + ip - update_conf(zenic, 'zampips', zampips) - update_conf(zenic, 'zamp_node_num', config_data['zamp_node_num']) - - mongodbips = '' - for ip in config_data['mongodb_ips']: - if not mongodbips: - mongodbips = ip - else: - mongodbips = mongodbips + ',' + ip - update_conf(zenic, 'mongodbips', mongodbips) - update_conf(zenic, 'mongodb_node_num', config_data['mongodb_node_num']) - - update_conf(zenic, 'zamp_vip', config_data['zamp_vip']) - update_conf(zenic, 'mongodb_vip', config_data['mongodb_vip']) - - deploy_hosts = config_data['deploy_hosts'] - for deploy_host in deploy_hosts: - nodeip = deploy_host['nodeip'] - hostname = deploy_host['hostname'] - MacName = deploy_host['MacName'] - memmode = deploy_host['memmode'] - - update_conf(zenic, 'nodeip', nodeip) - update_conf(zenic, 'hostname', hostname) - update_conf(zenic, 'MacName', MacName) - update_conf(zenic, 'memmode', memmode) - - nodeid = get_nodeid(nodeip, config_data['zbp_ips']) - update_conf(zenic, 'nodeid', nodeid) - - if nodeip in config_data['zamp_ips']: - update_conf(zenic, 'needzamp', 'y') - else: - update_conf(zenic, 'needzamp', 'n') - - zenic_conf = "%s_zenic.conf" % deploy_host['mgtip'] - zenic_conf_cluster_out = os.path.join(cluster_conf_path, zenic_conf) - zenic_conf_out = os.path.join(daisy_zenic_path, zenic_conf) - zenic.write(open(zenic_conf_cluster_out, "w+")) - - with open(zenic_conf_cluster_out, 'r') as fr,\ - open(zenic_conf_out, 'w') as fw: - for line in fr.readlines(): - fw.write(line.replace(' ', '')) - return - - -def test(): - print("Hello, world!") diff --git a/code/daisy/daisy/api/backends/zenic/install.py b/code/daisy/daisy/api/backends/zenic/install.py deleted file mode 100755 index 06ef3671..00000000 --- a/code/daisy/daisy/api/backends/zenic/install.py +++ /dev/null @@ -1,469 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/install endpoint for zenic API -""" -import subprocess -import time - -from oslo_config import cfg -from oslo_log import log as logging -import threading - -from daisy import i18n - -import daisy.api.v1 - -from daisy.common import exception -from daisy.api.backends.zenic import config -import daisy.api.backends.common as daisy_cmn -import daisy.api.backends.zenic.common as zenic_cmn - - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW -SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS -SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS -ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE - -CONF = cfg.CONF -install_opts = [ - cfg.StrOpt('max_parallel_os_number', default=10, - help='Maximum number of hosts install os at the same time.'), -] -CONF.register_opts(install_opts) - -CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') -CONF.import_opt('container_formats', 'daisy.common.config', - group='image_format') -CONF.import_opt('image_property_quota', 'daisy.common.config') - - -host_os_status = { - 'INIT': 'init', - 'INSTALLING': 'installing', - 'ACTIVE': 'active', - 'FAILED': 'install-failed' -} - -zenic_state = zenic_cmn.ZENIC_STATE -daisy_zenic_path = zenic_cmn.daisy_zenic_path - -install_zenic_progress = 0.0 -install_mutex = threading.Lock() - - -def update_progress_to_db(req, role_id_list, - status, progress_percentage_step=0.0): - """ - Write install progress and status to db, - we use global lock object 'install_mutex' - to make sure this function is thread safety. - :param req: http req. - :param role_id_list: Column neeb be update in role table. - :param status: install status. - :return: - """ - - global install_mutex - global install_zenic_progress - install_mutex.acquire(True) - install_zenic_progress += progress_percentage_step - role = {} - for role_id in role_id_list: - if 0 == cmp(status, zenic_state['INSTALLING']): - role['status'] = status - role['progress'] = install_zenic_progress - if 0 == cmp(status, zenic_state['INSTALL_FAILED']): - role['status'] = status - elif 0 == cmp(status, zenic_state['ACTIVE']): - role['status'] = status - role['progress'] = 100 - daisy_cmn.update_role(req, role_id, role) - install_mutex.release() - - -def _ping_hosts_test(ips): - ping_cmd = 'fping' - for ip in set(ips): - ping_cmd = ping_cmd + ' ' + ip - obj = subprocess.Popen( - ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (stdoutput, erroutput) = obj.communicate() - _returncode = obj.returncode - if _returncode == 0 or _returncode == 1: - ping_result = stdoutput.split('\n') - unreachable_hosts = [result.split( - )[0] for result in ping_result if result and - result.split()[2] != 'alive'] - else: - msg = "ping failed beaceuse there is invlid ip in %s" % ips - raise exception.InvalidIP(msg) - return unreachable_hosts - - -def _check_ping_hosts(ping_ips, max_ping_times): - if not ping_ips: - LOG.info(_("no ip got for ping test")) - return ping_ips - ping_count = 0 - time_step = 5 - LOG.info(_("begin ping test for %s" % ','.join(ping_ips))) - while True: - if ping_count == 0: - ips = _ping_hosts_test(ping_ips) - else: - ips = _ping_hosts_test(ips) - - ping_count += 1 - if ips: - LOG.debug( - _("ping host %s for %s times" % (','.join(ips), ping_count))) - if ping_count >= max_ping_times: - LOG.info(_("ping host %s timeout for %ss" % - (','.join(ips), ping_count * time_step))) - return ips - time.sleep(time_step) - else: - LOG.info(_("ping host %s success" % ','.join(ping_ips))) - time.sleep(120) - LOG.info(_("120s after ping host %s success" % ','.join(ping_ips))) - return ips - - -def _get_host_private_networks(host_detail, cluster_private_networks_name): - host_private_networks = [hi for pn in cluster_private_networks_name - for hi in - host_detail['interfaces'] if pn in - hi['assigned_networks']] - # If port type is bond,use pci segment of member port replace pci1 & pci2 - # segments of bond port - for interface_outer in host_private_networks: - if 0 != cmp(interface_outer.get('type', None), "bond"): - continue - slave1 = interface_outer.get('slave1', None) - slave2 = interface_outer.get('slave2', None) - if not slave1 or not slave2: - continue - interface_outer.pop('pci') - for interface_inner in host_detail['interfaces']: - if 0 == cmp(interface_inner.get('name', None), slave1): - interface_outer['pci1'] = interface_inner['pci'] - elif 0 == cmp(interface_inner.get('name', None), slave2): - interface_outer['pci2'] = interface_inner['pci'] - return host_private_networks - - -def get_cluster_zenic_config(req, cluster_id): - LOG.info(_("get zenic config from database...")) - # params = dict(limit=1000000) - - zenic_config = {} - - deploy_hosts = [] - deploy_host_cfg = {} - - mgt_ip = '' - zbp_ip_list = set() - mgt_ip_list = set() - - zamp_ip_list = set() - zamp_vip = '' - - mongodb_ip_list = set() - mongodb_vip = '' - - cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) - - all_roles = zenic_cmn.get_roles_detail(req) - - roles = [role for role in all_roles if (role['cluster_id'] == - cluster_id and role[ - 'deployment_backend'] == - daisy_cmn.zenic_backend_name)] - for role in roles: - if not (role['name'] == 'ZENIC_CTL' or role['name'] == 'ZENIC_NFM'): - continue - if role['name'] == 'ZENIC_NFM': - if not zamp_vip: - zamp_vip = role['vip'] - if not mongodb_vip: - mongodb_vip = role['mongodb_vip'] - role_hosts = zenic_cmn.get_hosts_of_role(req, role['id']) - - for role_host in role_hosts: - mgt_ip = '' - for deploy_host in deploy_hosts: - if role_host['host_id'] == deploy_host['hostid']: - mgt_ip = deploy_host['mgtip'] - deploy_ip = deploy_host['nodeip'] - break - if not mgt_ip: - host_detail = zenic_cmn.get_host_detail( - req, role_host['host_id']) - deploy_host_cfg = zenic_cmn.get_deploy_node_cfg( - req, host_detail, cluster_networks) - deploy_hosts.append(deploy_host_cfg) - mgt_ip = deploy_host_cfg['mgtip'] - deploy_ip = deploy_host_cfg['nodeip'] - - mgt_ip_list.add(mgt_ip) - if role['name'] == 'ZENIC_CTL': - zbp_ip_list.add(deploy_ip) - elif role['name'] == 'ZENIC_NFM': - zamp_ip_list.add(deploy_ip) - mongodb_ip_list.add(deploy_ip) - else: - LOG.warn( - _("<<>>" - % role['name'])) - - zenic_config.update({'deploy_hosts': deploy_hosts}) - zenic_config.update({'zbp_ips': zbp_ip_list}) - zenic_config.update({'zbp_node_num': len(zbp_ip_list)}) - zenic_config.update({'zamp_ips': zamp_ip_list}) - zenic_config.update({'zamp_node_num': len(zamp_ip_list)}) - zenic_config.update({'mongodb_ips': mongodb_ip_list}) - zenic_config.update({'mongodb_node_num': len(mongodb_ip_list)}) - zenic_config.update({'zamp_vip': zamp_vip}) - zenic_config.update({'mongodb_vip': mongodb_vip}) - return (zenic_config, mgt_ip_list) - - -def generate_zenic_config_file(cluster_id, zenic_config): - LOG.info(_("generate zenic config...")) - if zenic_config: - cluster_conf_path = daisy_zenic_path + cluster_id - config.update_zenic_conf(zenic_config, cluster_conf_path) - - -def thread_bin(req, host, role_id_list, pkg_name, install_progress_percentage): - host_ip = host['mgtip'] - password = host['rootpwd'] - - cmd = 'mkdir -p /var/log/daisy/daisy_install/' - daisy_cmn.subprocess_call(cmd) - - var_log_path =\ - "/var/log/daisy/daisy_install/%s_install_zenic.log" % host_ip - with open(var_log_path, "w+") as fp: - - cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password) - daisy_cmn.subprocess_call(cmd, fp) - - cmd = 'clush -S -b -w %s mkdir -p /home/workspace' % (host_ip,) - daisy_cmn.subprocess_call(cmd, fp) - - cmd = 'clush -S -b -w %s mkdir -p /etc/zenic' % (host_ip,) - daisy_cmn.subprocess_call(cmd, fp) - - cmd = 'clush -S -b -w %s rm -rf /etc/zenic/config' % (host_ip,) - daisy_cmn.subprocess_call(cmd, fp) - - cmd = 'clush -S -b -w %s rm -rf /home/zenic' % (host_ip,) - daisy_cmn.subprocess_call(cmd, fp) - - cmd = 'clush -S -b -w %s rm -rf /home/workspace/unipack' % (host_ip,) - daisy_cmn.subprocess_call(cmd, fp) - - pkg_file = daisy_zenic_path + pkg_name - cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % ( - host_ip, pkg_name) - daisy_cmn.subprocess_call(cmd, fp) - - cfg_file = daisy_zenic_path + host_ip + "_zenic.conf" - try: - exc_result = subprocess.check_output( - 'sshpass -p ossdbg1 scp %s root@%s:/etc/zenic/config' % ( - cfg_file, host_ip,), - shell=True, stderr=fp) - except subprocess.CalledProcessError as e: - update_progress_to_db( - req, role_id_list, zenic_state['INSTALL_FAILED']) - LOG.info(_("scp zenic pkg for %s failed!" % host_ip)) - fp.write(e.output.strip()) - exit() - else: - LOG.info(_("scp zenic config for %s successfully!" % host_ip)) - fp.write(exc_result) - - try: - exc_result = subprocess.check_output( - 'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % ( - pkg_file, host_ip,), - shell=True, stderr=fp) - except subprocess.CalledProcessError as e: - update_progress_to_db( - req, role_id_list, zenic_state['INSTALL_FAILED']) - LOG.info(_("scp zenic pkg for %s failed!" % host_ip)) - fp.write(e.output.strip()) - exit() - else: - LOG.info(_("scp zenic pkg for %s successfully!" % host_ip)) - fp.write(exc_result) - - cmd = 'clush -S -b -w %s unzip /home/workspace/%s \ - -d /home/workspace/unipack' % ( - host_ip, pkg_name,) - daisy_cmn.subprocess_call(cmd) - - try: - exc_result = subprocess.check_output( - 'clush -S -b -w %s /home/workspace/unipack/node_install.sh' - % (host_ip,), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - update_progress_to_db( - req, role_id_list, zenic_state['INSTALL_FAILED']) - LOG.info(_("install zenic for %s failed!" % host_ip)) - fp.write(e.output.strip()) - exit() - else: - LOG.info(_("install zenic for %s successfully!" % host_ip)) - fp.write(exc_result) - - try: - exc_result = subprocess.check_output( - 'clush -S -b -w %s /home/zenic/node_start.sh' % (host_ip,), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - update_progress_to_db( - req, role_id_list, zenic_state['INSTALL_FAILED']) - LOG.info(_("start zenic for %s failed!" % host_ip)) - fp.write(e.output.strip()) - exit() - else: - update_progress_to_db( - req, role_id_list, zenic_state['INSTALLING'], - install_progress_percentage) - LOG.info(_("start zenic for %s successfully!" % host_ip)) - fp.write(exc_result) - - -class ZENICInstallTask(Thread): - - """ - Class for install tecs bin. - """ - """ Definition for install states.""" - INSTALL_STATES = { - 'INIT': 'init', - 'INSTALLING': 'installing', - 'ACTIVE': 'active', - 'FAILED': 'install-failed' - } - - def __init__(self, req, cluster_id): - super(ZENICInstallTask, self).__init__() - self.req = req - self.cluster_id = cluster_id - self.progress = 0 - self.state = ZENICInstallTask.INSTALL_STATES['INIT'] - self.message = "" - self.zenic_config_file = '' - self.mgt_ip_list = '' - self.install_log_fp = None - self.last_line_num = 0 - self.need_install = False - self.ping_times = 36 - self.log_file = "/var/log/daisy/zenic_%s_install.log" % self.cluster_id - - def run(self): - try: - self._run() - except (exception.InstallException, - exception.NotFound, - exception.InstallTimeoutException) as e: - LOG.exception(e.message) - else: - if not self.need_install: - return - self.progress = 100 - self.state = zenic_state['ACTIVE'] - self.message = "Zenic install successfully" - LOG.info(_("install Zenic for cluster %s successfully." - % self.cluster_id)) - - def _run(self): - - (zenic_config, self.mgt_ip_list) = get_cluster_zenic_config( - self.req, self.cluster_id) - - if not self.mgt_ip_list: - msg = _("there is no host in cluster %s") % self.cluster_id - raise exception.ThreadBinException(msg) - - unreached_hosts = _check_ping_hosts(self.mgt_ip_list, self.ping_times) - if unreached_hosts: - self.state = zenic_state['INSTALL_FAILED'] - self.message = "hosts %s ping failed" % unreached_hosts - raise exception.NotFound(message=self.message) - - generate_zenic_config_file(self.cluster_id, zenic_config) - - # check and get ZENIC version - (zenic_version_pkg_file, zenic_version_pkg_name) =\ - zenic_cmn.check_and_get_zenic_version( - daisy_zenic_path) - if not zenic_version_pkg_file: - self.state = zenic_state['INSTALL_FAILED'] - self.message = \ - "ZENIC version file not found in %s" % daisy_zenic_path - raise exception.NotFound(message=self.message) - - (role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list( - self.req, self.cluster_id) - - update_progress_to_db( - self.req, role_id_list, zenic_state['INSTALLING'], 0.0) - install_progress_percentage = round(1 * 1.0 / len(hosts_list), 2) * 100 - - threads = [] - for host in hosts_list: - t = threading.Thread(target=thread_bin, args=( - self.req, host, role_id_list, - zenic_version_pkg_name, install_progress_percentage)) - t.setDaemon(True) - t.start() - threads.append(t) - LOG.info(_("install threads have started, please waiting....")) - - try: - for t in threads: - t.join() - except: - LOG.warn(_("Join install thread %s failed!" % t)) - else: - install_failed_flag = False - for role_id in role_id_list: - role = daisy_cmn.get_role_detail(self.req, role_id) - if role['progress'] == 0: - update_progress_to_db( - self.req, role_id_list, zenic_state['INSTALL_FAILED']) - install_failed_flag = True - break - if role['status'] == zenic_state['INSTALL_FAILED']: - install_failed_flag = True - break - if not install_failed_flag: - LOG.info( - _("all install threads have done, \ - set all roles status to 'active'!")) - update_progress_to_db( - self.req, role_id_list, zenic_state['ACTIVE']) diff --git a/code/daisy/daisy/api/backends/zenic/uninstall.py b/code/daisy/daisy/api/backends/zenic/uninstall.py deleted file mode 100755 index 80d6c6a8..00000000 --- a/code/daisy/daisy/api/backends/zenic/uninstall.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/hosts endpoint for Daisy v1 API -""" - - -import subprocess - -from oslo_log import log as logging -import threading -from daisy import i18n -import daisy.api.backends.common as daisy_cmn -import daisy.api.backends.zenic.common as zenic_cmn - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW - -zenic_state = zenic_cmn.ZENIC_STATE - -uninstall_zenic_progress = 100.0 -uninstall_mutex = threading.Lock() - - -def update_progress_to_db(req, role_id_list, status, - progress_percentage_step=0.0): - """ - Write uninstall progress and status to db, - we use global lock object 'uninstall_mutex' - to make sure this function is thread safety. - :param req: http req. - :param role_id_list: Column neeb be update in role table. - :param status: Uninstall status. - :return: - """ - - global uninstall_mutex - global uninstall_zenic_progress - uninstall_mutex.acquire(True) - uninstall_zenic_progress -= progress_percentage_step - role = {} - for role_id in role_id_list: - if 0 == cmp(status, zenic_state['UNINSTALLING']): - role['status'] = status - role['progress'] = uninstall_zenic_progress - if 0 == cmp(status, zenic_state['UNINSTALL_FAILED']): - role['status'] = status - elif 0 == cmp(status, zenic_state['INIT']): - role['status'] = status - role['progress'] = 0 - daisy_cmn.update_role(req, role_id, role) - uninstall_mutex.release() - - -def thread_bin(req, host, role_id_list, uninstall_progress_percentage): - host_ip = host['mgtip'] - password = host['rootpwd'] - cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/' - daisy_cmn.subprocess_call(cmd) - var_log_path =\ - "/var/log/daisy/daisy_uninstall/%s_uninstall_zenic.log" % host_ip - with open(var_log_path, "w+") as fp: - cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password) - daisy_cmn.subprocess_call(cmd, fp) - - try: - exc_result = subprocess.check_output( - 'clush -S -b -w %s /home/zenic/node_stop.sh' % (host_ip,), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - update_progress_to_db( - req, role_id_list, zenic_state['UNINSTALL_FAILED']) - fp.write(e.output.strip()) - else: - update_progress_to_db( - req, role_id_list, zenic_state['UNINSTALLING'], - uninstall_progress_percentage) - fp.write(exc_result) diff --git a/code/daisy/daisy/api/backends/zenic/upgrade.py b/code/daisy/daisy/api/backends/zenic/upgrade.py deleted file mode 100755 index c8035b0b..00000000 --- a/code/daisy/daisy/api/backends/zenic/upgrade.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/update endpoint for Daisy v1 API -""" - -import subprocess - -from oslo_log import log as logging -import threading -from daisy import i18n - -from daisy.common import exception -import daisy.api.backends.common as daisy_cmn -import daisy.api.backends.zenic.common as zenic_cmn - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW - -zenic_state = zenic_cmn.ZENIC_STATE -daisy_zenic_path = zenic_cmn.daisy_zenic_path - - -update_zenic_progress = 0.0 -update_mutex = threading.Lock() - - -def update_progress_to_db(req, role_id_list, status, - progress_percentage_step=0.0): - """ - Write update progress and status to db, - we use global lock object 'update_mutex' - to make sure this function is thread safety. - :param req: http req. - :param role_id_list: Column neeb be update in role table. - :param status: Update status. - :return: - """ - - global update_mutex - global update_zenic_progress - update_mutex.acquire(True) - update_zenic_progress += progress_percentage_step - role = {} - for role_id in role_id_list: - if 0 == cmp(status, zenic_state['UPDATING']): - role['status'] = status - role['progress'] = update_zenic_progress - if 0 == cmp(status, zenic_state['UPDATE_FAILED']): - role['status'] = status - elif 0 == cmp(status, zenic_state['ACTIVE']): - role['status'] = status - role['progress'] = 100 - daisy_cmn.update_role(req, role_id, role) - update_mutex.release() - - -def thread_bin(req, host, role_id_list, update_progress_percentage): - - (zenic_version_pkg_file, zenic_version_pkg_name) = \ - zenic_cmn.check_and_get_zenic_version( - daisy_zenic_path) - if not zenic_version_pkg_file: - # selfstate = zenic_state['INSTALL_FAILED'] - selfmessage = "ZENIC version file not found in %s" % daisy_zenic_path - raise exception.NotFound(message=selfmessage) - - host_ip = host['mgtip'] - password = host['rootpwd'] - - cmd = 'mkdir -p /var/log/daisy/daisy_upgrade/' - daisy_cmn.subprocess_call(cmd) - - var_log_path = \ - "/var/log/daisy/daisy_upgrade/%s_upgrade_zenic.log" % host_ip - with open(var_log_path, "w+") as fp: - cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password) - daisy_cmn.subprocess_call(cmd, fp) - cmd = 'clush -S -b -w %s /home/zenic/node_stop.sh' % (host_ip,) - daisy_cmn.subprocess_call(cmd, fp) - - cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % ( - host_ip, zenic_version_pkg_name) - daisy_cmn.subprocess_call(cmd, fp) - - cmd = 'clush -S -b -w %s rm -rf /home/workspace/unipack' % (host_ip,) - daisy_cmn.subprocess_call(cmd, fp) - - try: - exc_result = subprocess.check_output( - 'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % ( - zenic_version_pkg_file, host_ip,), - shell=True, stderr=fp) - except subprocess.CalledProcessError as e: - update_progress_to_db( - req, role_id_list, zenic_state['INSTALL_FAILED']) - LOG.info(_("scp zenic pkg for %s failed!" % host_ip)) - fp.write(e.output.strip()) - exit() - else: - LOG.info(_("scp zenic pkg for %s successfully!" % host_ip)) - fp.write(exc_result) - - cmd = 'clush -S -b -w %s unzip /home/workspace/%s \ - -d /home/workspace/unipack' % (host_ip, zenic_version_pkg_name,) - daisy_cmn.subprocess_call(cmd) - - try: - exc_result = subprocess.check_output( - 'clush -S -b -w %s /home/workspace/unipack/node_upgrade.sh' - % (host_ip,), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - update_progress_to_db( - req, role_id_list, zenic_state['UPDATE_FAILED']) - LOG.info(_("Upgrade zenic for %s failed!" % host_ip)) - fp.write(e.output.strip()) - else: - update_progress_to_db( - req, role_id_list, zenic_state['UPDATING'], - update_progress_percentage) - LOG.info(_("Upgrade zenic for %s successfully!" % host_ip)) - fp.write(exc_result) - - try: - exc_result = subprocess.check_output( - 'clush -S -b -w %s /home/zenic/node_start.sh' % (host_ip,), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - update_progress_to_db( - req, role_id_list, zenic_state['UPDATE_FAILED']) - LOG.info(_("Start zenic for %s failed!" % host_ip)) - fp.write(e.output.strip()) - else: - update_progress_to_db( - req, role_id_list, zenic_state['UPDATING'], - update_progress_percentage) - LOG.info(_("Start zenic for %s successfully!" % host_ip)) - fp.write(exc_result) diff --git a/code/daisy/daisy/api/configset/clush.py b/code/daisy/daisy/api/configset/clush.py index a2cb35bd..1cd65597 100755 --- a/code/daisy/daisy/api/configset/clush.py +++ b/code/daisy/daisy/api/configset/clush.py @@ -1,7 +1,6 @@ import subprocess import daisy.registry.client.v1.api as registry -from daisy.api.backends.tecs import config as role_service from oslo_log import log as logging import webob.exc from webob.exc import HTTPBadRequest @@ -188,7 +187,7 @@ class config_clushshell(): if service['component_id'] not in components_id: continue - services_name = role_service.service_map.get(service['name']) + services_name = daisy_cmn.service_map.get(service['name']) if not services_name: msg = "Can't find service for '%s'" % service raise HTTPBadRequest(explanation=msg) @@ -275,7 +274,7 @@ class config_clushshell(): def _role_service_restart(self, role_info, host_ip): """ """ for service in role_info['service_name']: - services_name = role_service.service_map.get(service) + services_name = daisy_cmn.service_map.get(service) if not services_name: msg = "Can't find service for '%s'" % service raise HTTPBadRequest(explanation=msg) diff --git a/code/daisy/daisy/api/v1/backup_restore.py b/code/daisy/daisy/api/v1/backup_restore.py index d8f7ca00..3c35adf7 100755 --- a/code/daisy/daisy/api/v1/backup_restore.py +++ b/code/daisy/daisy/api/v1/backup_restore.py @@ -35,7 +35,7 @@ from daisy.common import wsgi import daisy.registry.client.v1.api as registry from daisy.api.v1 import controller from daisy.api.v1 import filters -import daisy.api.backends.tecs.common as tecs_cmn +import daisy.api.backends.common as daisy_cmn LOG = logging.getLogger(__name__) @@ -169,7 +169,7 @@ class Controller(controller.BaseController): 'rm -rf {0}daisy_tmp'.format(BACK_PATH) ] - tecs_cmn.run_scrip(scripts, msg='Backup file failed!') + daisy_cmn.run_scrip(scripts, msg='Backup file failed!') return {"backup_file": BACK_PATH + backup_file_name} @utils.mutating @@ -191,7 +191,8 @@ class Controller(controller.BaseController): BACK_PATH), 'rm -rf {0}daisy_tmp'.format(BACK_PATH) ] - tecs_cmn.run_scrip(restore_scripts, msg='Restore failed!') + + daisy_cmn.run_scrip(restore_scripts, msg='Restore failed!') LOG.info('Restore successfully') @utils.mutating @@ -210,7 +211,7 @@ class Controller(controller.BaseController): file_meta['backup_file_path'], BACK_PATH) ] - tecs_cmn.run_scrip(scripts, msg='Decompression file failed!') + daisy_cmn.run_scrip(scripts, msg='Decompression file failed!') try: version = subprocess.check_output( @@ -222,7 +223,7 @@ class Controller(controller.BaseController): LOG.error(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") - tecs_cmn.run_scrip(['rm -rf {0}daisy_tmp'.format(BACK_PATH)]) + daisy_cmn.run_scrip(['rm -rf {0}daisy_tmp'.format(BACK_PATH)]) return {"backup_file_version": version} @utils.mutating diff --git a/code/daisy/daisy/api/v1/host_template.py b/code/daisy/daisy/api/v1/host_template.py index 5b67098c..9345008b 100755 --- a/code/daisy/daisy/api/v1/host_template.py +++ b/code/daisy/daisy/api/v1/host_template.py @@ -40,15 +40,11 @@ from daisy import notifier import daisy.registry.client.v1.api as registry from daisy.registry.api.v1 import template -import daisy.api.backends.tecs.common as tecs_cmn - try: import simplejson as json except ImportError: import json -daisy_tecs_path = tecs_cmn.daisy_tecs_path - LOG = logging.getLogger(__name__) _ = i18n._ diff --git a/code/daisy/daisy/api/v1/hosts.py b/code/daisy/daisy/api/v1/hosts.py index 9f2dd6e0..dc88c8f5 100755 --- a/code/daisy/daisy/api/v1/hosts.py +++ b/code/daisy/daisy/api/v1/hosts.py @@ -40,7 +40,6 @@ from daisy import notifier import daisy.registry.client.v1.api as registry import threading import daisy.api.backends.common as daisy_cmn -import daisy.api.backends.tecs.common as tecs_cmn import ConfigParser import socket import netaddr @@ -212,16 +211,6 @@ class Controller(controller.BaseController): LOG.error(msg) raise HTTPForbidden(msg) - def validate_mac_format(self, mac_str): - '''Validates a mac address''' - if re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", - mac_str.lower()): - return - else: - msg = (_("%s invalid mac format!") % mac_str) - LOG.error(msg) - raise HTTPForbidden(msg) - def get_cluster_networks_info(self, req, cluster_id=None, type=None): ''' get_cluster_networks_info by cluster id @@ -627,10 +616,7 @@ class Controller(controller.BaseController): """ self._enforce(req, 'get_host') host_meta = self.get_host_meta_or_404(req, id) - if host_meta.get("hwm_id"): - self.check_discover_state_with_hwm(req, host_meta) - else: - self.check_discover_state_with_no_hwm(req, host_meta) + self.check_discover_state_with_no_hwm(req, host_meta) host_vcpu_pin = vcpu_pin.allocate_cpus(host_meta) host_meta.update(host_vcpu_pin) if 'role' in host_meta and 'CONTROLLER_HA' in host_meta['role']: @@ -640,7 +626,7 @@ class Controller(controller.BaseController): cluster_id = cluster_info[0]['id'] ctl_ha_nodes_min_mac =\ - tecs_cmn.get_ctl_ha_nodes_min_mac(req, cluster_id) + daisy_cmn.get_ctl_ha_nodes_min_mac(req, cluster_id) sorted_ha_nodes = \ sorted(ctl_ha_nodes_min_mac.iteritems(), key=lambda d: d[1]) sorted_ha_nodes_min_mac = \ @@ -657,9 +643,9 @@ class Controller(controller.BaseController): role_id = role['id'] break service_disks = \ - tecs_cmn.get_service_disk_list(req, - {'filters': { - 'role_id': role_id}}) + daisy_cmn.get_service_disk_list(req, + {'filters': { + 'role_id': role_id}}) db_share_cluster_disk = [] service_lun_info = [] for disk in service_disks: @@ -708,35 +694,11 @@ class Controller(controller.BaseController): try: nodes = registry.get_hosts_detail(req.context, **params) for node in nodes: - if node.get("hwm_id"): - self.check_discover_state_with_hwm(req, node) - else: - self.check_discover_state_with_no_hwm(req, node) + self.check_discover_state_with_no_hwm(req, node) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return dict(nodes=nodes) - def check_discover_state_with_hwm(self, req, node): - node['discover_state'] = None - host_meta = self.get_host_meta_or_404(req, node.get('id')) - if host_meta and host_meta.get('interfaces'): - mac_list = [ - interface['mac'] for interface in - host_meta.get('interfaces') if interface.get('mac')] - if mac_list: - min_mac = min(mac_list) - pxe_discover_host = self._get_discover_host_by_mac(req, - min_mac) - if pxe_discover_host: - if pxe_discover_host.get('ip'): - node['discover_state'] = \ - "SSH:" + pxe_discover_host.get('status') - else: - node['discover_state'] = \ - "PXE:" + pxe_discover_host.get('status') - - return node - def check_discover_state_with_no_hwm(self, req, node): node['discover_state'] = None host_meta = self.get_host_meta_or_404(req, node.get('id')) @@ -752,59 +714,6 @@ class Controller(controller.BaseController): return node - def _update_hwm_host(self, req, hwm_host, hosts, hwm_ip): - hwm_host_mac = [hwm_host_interface['mac'] for hwm_host_interface - in hwm_host.get('interfaces')] - for host in hosts: - host_update_meta = dict() - host_meta = self.get_host_meta_or_404(req, host['id']) - host_mac = [host_interface['mac'] for host_interface - in host_meta.get('interfaces')] - set_same_mac = set(hwm_host_mac) & set(host_mac) - - if set_same_mac: - host_update_meta['hwm_id'] = hwm_host['id'] - host_update_meta['hwm_ip'] = hwm_ip - node = registry.update_host_metadata(req.context, host['id'], - host_update_meta) - return node - - host_add_meta = dict() - host_add_meta['name'] = str(hwm_host['id']) - host_add_meta['description'] = 'default' - host_add_meta['os_status'] = 'init' - host_add_meta['hwm_id'] = str(hwm_host['id']) - host_add_meta['hwm_ip'] = str(hwm_ip) - host_add_meta['interfaces'] = str(hwm_host['interfaces']) - node = registry.add_host_metadata(req.context, host_add_meta) - return node - - def update_hwm_host(self, req, host_meta): - self._enforce(req, 'get_hosts') - params = self._get_query_params(req) - try: - hosts = registry.get_hosts_detail(req.context, **params) - hosts_without_hwm_id = list() - hosts_hwm_id_list = list() - for host in hosts: - if host.get('hwm_id'): - hosts_hwm_id_list.append(host['hwm_id']) - else: - hosts_without_hwm_id.append(host) - - hwm_hosts = host_meta['nodes'] - hwm_ip = host_meta['hwm_ip'] - nodes = list() - for hwm_host in eval(hwm_hosts): - if hwm_host['id'] in hosts_hwm_id_list: - continue - node = self._update_hwm_host(req, hwm_host, - hosts_without_hwm_id, hwm_ip) - nodes.append(node) - return dict(nodes=nodes) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - def _compute_hugepage_memory(self, hugepages, memory, hugepagesize='1G'): hugepage_memory = 0 if hugepagesize == '2M': @@ -1041,7 +950,7 @@ class Controller(controller.BaseController): raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") - orig_mac_list = list() + if 'interfaces' in host_meta: for interface_param in eval(host_meta['interfaces']): if not interface_param.get('pci', None) and \ @@ -1058,12 +967,9 @@ class Controller(controller.BaseController): 'vswitch_type'] raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") - interfaces_db = orig_host_meta.get('interfaces', None) - orig_mac_list = [interface_db['mac'] for interface_db in - interfaces_db if interface_db['mac']] - orig_pci_list = [interface_db['pci'] for interface_db in - interfaces_db if interface_db['pci']] - if interfaces_db and len(orig_pci_list): + + if orig_host_meta.get('interfaces', None): + interfaces_db = orig_host_meta['interfaces'] interfaces_param = eval(host_meta['interfaces']) interfaces_db_ether = [ interface_db for interface_db in interfaces_db if @@ -1625,17 +1531,6 @@ class Controller(controller.BaseController): host_meta = registry.update_host_metadata(req.context, id, host_meta) - if orig_mac_list: - orig_min_mac = min(orig_mac_list) - discover_host = self._get_discover_host_by_mac(req, - orig_min_mac) - if discover_host: - discover_host_params = { - "mac": orig_min_mac, - "status": "DISCOVERY_SUCCESSFUL" - } - self.update_pxe_host(req, discover_host['id'], - discover_host_params) except exception.Invalid as e: msg = (_("Failed to update host metadata. Got error: %s") % utils.exception_to_str(e)) @@ -2373,112 +2268,6 @@ class Controller(controller.BaseController): return {'host_meta': host_meta} - def _get_discover_host_mac(self, req): - params = dict() - hosts_mac = list() - discover_hosts =\ - registry.get_discover_hosts_detail(req.context, **params) - for host in discover_hosts: - if host.get('mac'): - hosts_mac.append(host['mac']) - return hosts_mac - - def _get_discover_host_by_mac(self, req, host_mac): - params = dict() - discover_hosts = \ - registry.get_discover_hosts_detail(req.context, **params) - LOG.info("%s" % discover_hosts) - for host in discover_hosts: - if host.get('mac') == host_mac: - return host - return - - @utils.mutating - def add_pxe_host(self, req, host_meta): - """ - Adds a new pxe host to Daisy - - :param req: The WSGI/Webob Request object - :param host_meta: Mapping of metadata about host - - :raises HTTPBadRequest if x-host-name is missing - """ - self._enforce(req, 'add_pxe_host') - LOG.warn("host_meta: %s" % host_meta) - if not host_meta.get('mac'): - msg = "MAC parameter can not be None." - raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - - self.validate_mac_format(host_meta['mac']) - pxe_hosts_mac = self._get_discover_host_mac(req) - if host_meta['mac'] in pxe_hosts_mac: - host = self._get_discover_host_by_mac(req, host_meta['mac']) - host_meta = registry.update_discover_host_metadata( - req.context, host['id'], host_meta) - return {'host_meta': host_meta} - - if not host_meta.get('status', None): - host_meta['status'] = 'None' - - try: - pxe_host_info = \ - registry.add_discover_host_metadata(req.context, host_meta) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return {'host_meta': pxe_host_info} - - @utils.mutating - def update_pxe_host(self, req, id, host_meta): - """ - Update a new pxe host to Daisy - """ - self._enforce(req, 'update_pxe_host') - if not host_meta.get('mac'): - msg = "MAC parameter can not be None." - raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - - self.validate_mac_format(host_meta['mac']) - orig_host_meta = registry.get_discover_host_metadata(req.context, id) - try: - if host_meta['mac'] == orig_host_meta['mac']: - host_meta = registry.update_discover_host_metadata( - req.context, id, host_meta) - - except exception.Invalid as e: - msg = (_("Failed to update discover host metadata. " - "Got error: %s") % utils.exception_to_str(e)) - LOG.error(msg) - raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - except exception.NotFound as e: - msg = (_("Failed to find discover host to update: %s") % - utils.exception_to_str(e)) - LOG.error(msg) - raise HTTPNotFound(explanation=msg, - request=req, - content_type="text/plain") - except exception.Forbidden as e: - msg = (_("Forbidden to update discover host: %s") % - utils.exception_to_str(e)) - LOG.error(msg) - raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") - except (exception.Conflict, exception.Duplicate) as e: - LOG.error(utils.exception_to_str(e)) - raise HTTPConflict(body=_('Host operation conflicts'), - request=req, - content_type='text/plain') - else: - self.notifier.info('host.update', host_meta) - - return {'host_meta': host_meta} - class HostDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" @@ -2497,9 +2286,6 @@ class HostDeserializer(wsgi.JSONRequestDeserializer): def discover_host(self, request): return self._deserialize(request) - def update_hwm_host(self, request): - return self._deserialize(request) - def add_discover_host(self, request): return self._deserialize(request) diff --git a/code/daisy/daisy/api/v1/hwms.py b/code/daisy/daisy/api/v1/hwms.py deleted file mode 100755 index 57b8e153..00000000 --- a/code/daisy/daisy/api/v1/hwms.py +++ /dev/null @@ -1,347 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/Hwm endpoint for Daisy v1 API -""" - -from oslo_config import cfg -from oslo_log import log as logging -import webob.exc -from webob.exc import HTTPBadRequest -from webob.exc import HTTPConflict -from webob.exc import HTTPForbidden -from webob.exc import HTTPNotFound -from webob import Response - -from daisy.api import policy -import daisy.api.v1 -from daisy.api.v1 import controller -from daisy.api.v1 import filters -from daisy.common import exception -from daisy.common import property_utils -from daisy.common import utils -from daisy.common import wsgi -from daisy import i18n -from daisy import notifier -import daisy.registry.client.v1.api as registry -from daisy.registry.api.v1 import hwms - -import daisy.api.backends.tecs.common as tecs_cmn - -daisy_tecs_path = tecs_cmn.daisy_tecs_path - - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW - -SUPPORTED_PARAMS = hwms.SUPPORTED_PARAMS -SUPPORTED_FILTERS = hwms.SUPPORTED_FILTERS -ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE -CONF = cfg.CONF -CONF.import_opt('disk_formats', 'daisy.common.config', - group='image_format') -CONF.import_opt('container_formats', 'daisy.common.config', - group='image_format') -CONF.import_opt('image_property_quota', 'daisy.common.config') - - -class Controller(controller.BaseController): - """ - WSGI controller for hwms resource in Daisy v1 API - - The Templates resource API is a RESTful web Template for Template data. - The API is as follows:: - - GET /Templates -- Returns a set of brief metadata about Templates - GET /Templates/detail -- Returns a set of detailed metadata about - Templates - HEAD /Templates/ -- Return metadata about an Template with id - GET /Templates/ -- Return Template data for Template with id - POST /Templates -- Store Template data and return metadata about the - newly-stored Template - PUT /Templates/ -- Update Template metadata and/or upload Template - data for a previously-reserved Template - DELETE /Templates/ -- Delete the Template with id - """ - def __init__(self): - self.notifier = notifier.Notifier() - registry.configure_registry_client() - self.policy = policy.Enforcer() - if property_utils.is_property_protection_enabled(): - self.prop_enforcer = property_utils.PropertyRules(self.policy) - else: - self.prop_enforcer = None - - def _enforce(self, req, action, target=None): - """Authorize an action against our policies""" - if target is None: - target = {} - try: - self.policy.enforce(req.context, action, target) - except exception.Forbidden: - raise HTTPForbidden() - - def _get_filters(self, req): - """ - Return a dictionary of query param filters from the request - - :param req: the Request object coming from the wsgi layer - :retval a dict of key/value filters - """ - query_filters = {} - for param in req.params: - if param in SUPPORTED_FILTERS: - query_filters[param] = req.params.get(param) - if not filters.validate(param, query_filters[param]): - raise HTTPBadRequest(_('Bad value passed to filter ' - '%(filter)s got %(val)s') - % {'filter': param, - 'val': query_filters[param]}) - return query_filters - - def _get_query_params(self, req): - """ - Extracts necessary query params from request. - - :param req: the WSGI Request object - :retval dict of parameters that can be used by registry client - """ - params = {'filters': self._get_filters(req)} - - for PARAM in SUPPORTED_PARAMS: - if PARAM in req.params: - params[PARAM] = req.params.get(PARAM) - return params - - def _raise_404_if_cluster_deleted(self, req, cluster_id): - cluster = self.get_cluster_meta_or_404(req, cluster_id) - if cluster['deleted']: - msg = _("Cluster with identifier %s has been deleted.") % \ - cluster_id - raise webob.exc.HTTPNotFound(msg) - - def get_clusters_hwm_ip(self, req): - params = self._get_query_params(req) - clusters_hwm_ip = list() - clusters = registry.get_clusters_detail(req.context, **params) - for cluster in clusters: - clusters_hwm_ip.append(cluster.get('hwm_ip')) - return clusters_hwm_ip - - @utils.mutating - def add_hwm(self, req, hwm): - """ - Adds a new hwm to Daisy. - - :param req: The WSGI/Webob Request object - :param image_meta: Mapping of metadata about Template - - :raises HTTPBadRequest if x-Template-name is missing - """ - self._enforce(req, 'add_template') - hwm = registry.add_hwm_metadata(req.context, hwm) - - return {'hwm': hwm} - - @utils.mutating - def update_hwm(self, req, id, hwm): - """ - Updates an existing hwm with the registry. - - :param request: The WSGI/Webob Request object - :param id: The opaque image identifier - - :retval Returns the updated image information as a mapping - """ - self._enforce(req, 'update_hwm') - hwm_meta = registry.hwm_detail_metadata(req.context, id) - hwm_ip = hwm_meta['hwm_ip'] - clusters_hwm_ip = self.get_clusters_hwm_ip(req) - if hwm_ip in clusters_hwm_ip: - msg = (_("Hwm %s has already used in cluster, " - "it can not be update. " % hwm_ip)) - LOG.error(msg) - raise HTTPForbidden(explanation=msg, request=req, - content_type="text/plain") - try: - hwm = registry.update_hwm_metadata(req.context, id, hwm) - except exception.Invalid as e: - msg = (_("Failed to update hwm metadata. Got error: %s") % - utils.exception_to_str(e)) - LOG.warn(msg) - raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - except exception.NotFound as e: - msg = (_("Failed to find hwm to update: %s") % - utils.exception_to_str(e)) - LOG.warn(msg) - raise HTTPNotFound(explanation=msg, - request=req, - content_type="text/plain") - except exception.Forbidden as e: - msg = (_("Forbidden to update hwm: %s") % - utils.exception_to_str(e)) - LOG.warn(msg) - raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") - except (exception.Conflict, exception.Duplicate) as e: - LOG.warn(utils.exception_to_str(e)) - raise HTTPConflict(body=_('hwm operation conflicts'), - request=req, - content_type='text/plain') - else: - self.notifier.info('hwm.update', hwm) - - return {'hwm': hwm} - - @utils.mutating - def delete_hwm(self, req, id): - """ - delete a existing hwm template with the registry. - - :param request: The WSGI/Webob Request object - :param id: The opaque image identifier - - :retval Returns the updated image information as a mapping - """ - self._enforce(req, 'delete_hwm') - hwm_meta = registry.hwm_detail_metadata(req.context, id) - hwm_ip = hwm_meta['hwm_ip'] - clusters_hwm_ip = self.get_clusters_hwm_ip(req) - if hwm_ip in clusters_hwm_ip: - msg = (_("Hwm %s has already used in cluster, " - "it can not be deleted. " % hwm_ip)) - LOG.error(msg) - raise HTTPForbidden(explanation=msg, request=req, - content_type="text/plain") - try: - registry.delete_hwm_metadata(req.context, id) - except exception.NotFound as e: - msg = (_("Failed to find hwm to delete: %s") % - utils.exception_to_str(e)) - LOG.error(msg) - raise HTTPNotFound(explanation=msg, request=req, - content_type="text/plain") - except exception.Forbidden as e: - msg = (_("Forbidden to delete hwm: %s") % - utils.exception_to_str(e)) - LOG.error(msg) - raise HTTPForbidden(explanation=msg, request=req, - content_type="text/plain") - except exception.InUseByStore as e: - msg = (_( - "hwm %(id)s could not be deleted because it is in " - "use:%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) - LOG.error(msg) - raise HTTPConflict(explanation=msg, request=req, - content_type="text/plain") - else: - return Response(body='', status=200) - - @utils.mutating - def detail(self, req, id): - """ - delete a existing hwm with the registry. - :param request: The WSGI/Webob Request object - :param id: The opaque image identifie - :retval Returns the updated image information as a mapping - """ - self._enforce(req, 'detail') - context = req.context - try: - hwm_meta = registry.hwm_detail_metadata(context, id) - except exception.NotFound: - msg = "Hwm with identifier %s not found" % id - LOG.debug(msg) - raise webob.exc.HTTPNotFound( - msg, request=req, content_type='text/plain') - except exception.Forbidden: - msg = "Forbidden hwm access" - LOG.debug(msg) - raise webob.exc.HTTPForbidden(msg, - request=req, - content_type='text/plain') - return {'hwm': hwm_meta} - - @utils.mutating - def list(self, req): - self._enforce(req, 'list') - params = self._get_query_params(req) - try: - hwm_list = registry.hwm_list_metadata(req.context, **params) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return dict(hwm=hwm_list) - - -class HwmDeserializer(wsgi.JSONRequestDeserializer): - """Handles deserialization of specific controller method requests.""" - def _deserialize(self, request): - result = {} - result["hwm"] = utils.get_hwm_meta(request) - return result - - def add_hwm(self, request): - return self._deserialize(request) - - def update_hwm(self, request): - return self._deserialize(request) - - -class HwmSerializer(wsgi.JSONResponseSerializer): - """Handles serialization of specific controller method responses.""" - def __init__(self): - self.notifier = notifier.Notifier() - - def add_hwm(self, response, result): - hwm = result['hwm'] - response.status = 201 - response.headers['Content-Type'] = 'application/json' - response.body = self.to_json(dict(hwm=hwm)) - return response - - def delete_hwm(self, response, result): - hwm = result['hwm'] - response.status = 201 - response.headers['Content-Type'] = 'application/json' - response.body = self.to_json(dict(hwm=hwm)) - return response - - def get_detail(self, response, result): - hwm = result['hwm'] - response.status = 201 - response.headers['Content-Type'] = 'application/json' - response.body = self.to_json(dict(hwm=hwm)) - return response - - def update_hwm(self, response, result): - hwm = result['hwm'] - response.status = 201 - response.headers['Content-Type'] = 'application/json' - response.body = self.to_json(dict(hwm=hwm)) - return response - - -def create_resource(): - """Templates resource factory method""" - deserializer = HwmDeserializer() - serializer = HwmSerializer() - return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/api/v1/router.py b/code/daisy/daisy/api/v1/router.py index da6c2450..f3bcd4e3 100755 --- a/code/daisy/daisy/api/v1/router.py +++ b/code/daisy/daisy/api/v1/router.py @@ -29,7 +29,6 @@ from daisy.api.v1 import networks from daisy.api.v1 import install from daisy.api.v1 import disk_array from daisy.api.v1 import host_template -from daisy.api.v1 import hwms from daisy.common import wsgi from daisy.api.v1 import backup_restore @@ -153,11 +152,6 @@ class API(wsgi.Router): action='get_host', conditions={'method': ['GET']}) - mapper.connect("/hwm_nodes", - controller=hosts_resource, - action='update_hwm_host', - conditions={'method': ['POST']}) - mapper.connect("/discover_host/", controller=hosts_resource, action='discover_host', @@ -198,30 +192,6 @@ class API(wsgi.Router): action='update_pxe_host', conditions={'method': ['PUT']}) - hwms_resource = hwms.create_resource() - - mapper.connect("/hwm", - controller=hwms_resource, - action='add_hwm', - conditions={'method': ['POST']}) - mapper.connect("/hwm/{id}", - controller=hwms_resource, - action='delete_hwm', - conditions={'method': ['DELETE']}) - mapper.connect("/hwm/{id}", - controller=hwms_resource, - action='update_hwm', - conditions={'method': ['PUT']}) - mapper.connect("/hwm", - controller=hwms_resource, - action='list', - conditions={'method': ['GET']}) - - mapper.connect("/hwm/{id}", - controller=hwms_resource, - action='detail', - conditions={'method': ['GET']}) - clusters_resource = clusters.create_resource() mapper.connect("/clusters", diff --git a/code/daisy/daisy/api/v1/template.py b/code/daisy/daisy/api/v1/template.py index 86e674f9..93c9d8e3 100755 --- a/code/daisy/daisy/api/v1/template.py +++ b/code/daisy/daisy/api/v1/template.py @@ -40,10 +40,10 @@ from daisy import notifier import daisy.registry.client.v1.api as registry from daisy.registry.api.v1 import template -import daisy.api.backends.tecs.common as tecs_cmn import daisy.api.backends.common as daisy_cmn -daisy_tecs_path = tecs_cmn.daisy_tecs_path +#TODO (huzhj) move it into common sub module +daisy_path = '/var/lib/daisy/' LOG = logging.getLogger(__name__) @@ -396,7 +396,7 @@ class Controller(controller.BaseController): template_detail['hosts'] = json.loads( template_detail['hosts']) - tecs_json = daisy_tecs_path + "%s.json" % template_name + tecs_json = daisy_path + "%s.json" % template_name cmd = 'rm -rf %s' % (tecs_json,) daisy_cmn.subprocess_call(cmd) with open(tecs_json, "w+") as fp: diff --git a/code/daisy/daisy/common/utils.py b/code/daisy/daisy/common/utils.py index 91eed163..9b61d2b1 100755 --- a/code/daisy/daisy/common/utils.py +++ b/code/daisy/daisy/common/utils.py @@ -348,13 +348,6 @@ def get_host_meta(response): return result -def get_hwm_meta(response): - result = {} - for key, value in response.json.items(): - result[key] = value - return result - - def get_cluster_meta(response): result = {} for key, value in response.json.items(): diff --git a/code/daisy/daisy/db/sqlalchemy/api.py b/code/daisy/daisy/db/sqlalchemy/api.py index 7a3ab19e..ace81eb0 100755 --- a/code/daisy/daisy/db/sqlalchemy/api.py +++ b/code/daisy/daisy/db/sqlalchemy/api.py @@ -6068,127 +6068,6 @@ def cinder_volume_list(context, filters=None, **param): cinder_volume_ref = _cinder_volume_get(context, role_id=role_id) return cinder_volume_ref -@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, - stop_max_attempt_number=50) -def hwm_add(context, values): - """add hwm to daisy.""" - return _hwm_update(context, values, None) - - -@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, - stop_max_attempt_number=50) -def hwm_update(context, hwm_id, values): - """update cluster template to daisy.""" - return _hwm_update(context, values, hwm_id) - - -def _hwm_update(context, values, hwm_id): - """update or add hwm to daisy.""" - values = values.copy() - session = get_session() - with session.begin(): - if hwm_id: - hwm_ref = _hwm_get(context, hwm_id, session=session) - else: - hwm_ref = models.Hwm() - - if hwm_id: - # Don't drop created_at if we're passing it in... - _drop_protected_attrs(models.Hwm, values) - # NOTE(iccha-sethi): updated_at must be explicitly set in case - # only ImageProperty table was modifited - values['updated_at'] = timeutils.utcnow() - - if hwm_id: - if values.get('id', None): del values['id'] - hwm_ref.update(values) - _update_values(hwm_ref, values) - try: - hwm_ref.save(session=session) - except db_exception.DBDuplicateEntry: - raise exception.Duplicate("Node ID %s already exists!" - % values['id']) - else: - hwm_ref.update(values) - _update_values(hwm_ref, values) - try: - hwm_ref.save(session=session) - except db_exception.DBDuplicateEntry: - raise exception.Duplicate("Node ID %s already exists!" - % values['id']) - - return hwm_get(context, hwm_ref.id) - - -def hwm_destroy(context, hwm_id, session=None, force_show_deleted=False): - session = session or get_session() - with session.begin(): - hwm_ref = _hwm_get(context, hwm_id, session=session) - hwm_ref.delete(session=session) - return hwm_ref - - -def _hwm_get(context, hwm_id, session=None, force_show_deleted=False): - """Get an hwm or raise if it does not exist.""" - session = session or get_session() - try: - query = session.query(models.Hwm).filter_by(id=hwm_id) - - # filter out deleted images if context disallows it - if not force_show_deleted and not context.can_see_deleted: - query = query.filter_by(deleted=False) - hwm = query.one() - return hwm - except sa_orm.exc.NoResultFound: - msg = "No hwm found with ID %s" % hwm_id - LOG.debug(msg) - raise exception.NotFound(msg) - - -def hwm_get(context, hwm_id, session=None, force_show_deleted=False): - hwm = _hwm_get(context, hwm_id, session=session, - force_show_deleted=force_show_deleted) - return hwm - - -def hwm_get_all(context, filters=None, marker=None, limit=None, sort_key=None, - sort_dir=None): - sort_key = ['created_at'] if not sort_key else sort_key - - default_sort_dir = 'desc' - - if not sort_dir: - sort_dir = [default_sort_dir] * len(sort_key) - elif len(sort_dir) == 1: - default_sort_dir = sort_dir[0] - sort_dir *= len(sort_key) - - filters = filters or {} - showing_deleted = 'changes-since' in filters or filters.get('deleted', - False) - marker_hwm = None - if marker is not None: - marker_hwm = _hwm_get(context, marker, - force_show_deleted=showing_deleted) - - for key in ['created_at', 'id']: - if key not in sort_key: - sort_key.append(key) - sort_dir.append(default_sort_dir) - - session = get_session() - query = session.query(models.Hwm).filter_by(deleted=showing_deleted) - - query = _paginate_query(query, models.Hwm, limit, sort_key, - marker=marker_hwm, - sort_dir=None, - sort_dirs=sort_dir) - hwms = [] - for hwm in query.all(): - hwm = hwm.to_dict() - hwms.append(hwm) - return hwms - @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) def template_add(context, values): diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/011_add_hwm_table.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/011_add_hwm_table.py index f7e3c753..0b43d6e0 100755 --- a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/011_add_hwm_table.py +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/011_add_hwm_table.py @@ -13,6 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. +# Note: hwm (hardware management) table is currently not used. This is just +# a place holder. from sqlalchemy import MetaData, Table, Column, String from daisy.db.sqlalchemy.migrate_repo.schema import (Boolean, DateTime, Text, create_tables) diff --git a/code/daisy/daisy/db/sqlalchemy/models.py b/code/daisy/daisy/db/sqlalchemy/models.py index f9faa832..762c1619 100755 --- a/code/daisy/daisy/db/sqlalchemy/models.py +++ b/code/daisy/daisy/db/sqlalchemy/models.py @@ -116,16 +116,6 @@ class DaisyBase(models.ModelBase, models.TimestampMixin): return d -class Hwm(BASE, DaisyBase): - - """Represents an hwm in the datastore.""" - __tablename__ = 'hwm' - __table_args__ = (Index('ix_hwm_deleted', 'deleted'),) - - description = Column(Text) - hwm_ip = Column(String(36), nullable=True) - - class Host(BASE, DaisyBase): """Represents an host in the datastore.""" @@ -152,8 +142,6 @@ class Host(BASE, DaisyBase): messages = Column(Text) hugepagesize = Column(String(36)) hugepages = Column(Integer(), default=0) - hwm_id = Column(String(36)) - hwm_ip = Column(String(256)) vcpu_pin_set = Column(String(255)) dvs_high_cpuset = Column(String(255)) pci_high_cpuset = Column(String(255)) @@ -205,7 +193,6 @@ class Cluster(BASE, DaisyBase): segmentation_type = Column(String(64)) auto_scale = Column(Integer(), nullable=False, default=0) use_dns = Column(Integer(), nullable=False, default=0) - hwm_ip = Column(String(256)) class ClusterHost(BASE, DaisyBase): @@ -620,7 +607,7 @@ class CinderVolume(BASE, DaisyBase): def register_models(engine): """Create database tables for all models with the given engine.""" - models = (Hwm, Host, DiscoverHost, Cluster, ClusterHost, Template, + models = (Host, DiscoverHost, Cluster, ClusterHost, Template, HostTemplate, HostInterface, Network, IpRange, HostRole, Role, ServiceRole, Service, Component, ConfigSet, Config, ConfigFile, ConfigSetItem, ConfigHistory, Task, TaskInfo, @@ -633,7 +620,7 @@ def register_models(engine): def unregister_models(engine): """Drop database tables for all models with the given engine.""" - models = (Hwm, Host, DiscoverHost, Cluster, ClusterHost, Template, + models = (Host, DiscoverHost, Cluster, ClusterHost, Template, HostTemplate, HostInterface, Network, IpRange, HostRole, Role, ServiceRole, Service, Component, ConfigSet, Config, ConfigFile, ConfigSetItem, ConfigHistory, Task, TaskInfo, diff --git a/code/daisy/daisy/registry/api/v1/__init__.py b/code/daisy/daisy/registry/api/v1/__init__.py index f0129faa..7edd58c5 100755 --- a/code/daisy/daisy/registry/api/v1/__init__.py +++ b/code/daisy/daisy/registry/api/v1/__init__.py @@ -23,7 +23,6 @@ from daisy.registry.api.v1 import configs from daisy.registry.api.v1 import networks from daisy.registry.api.v1 import disk_array from daisy.registry.api.v1 import template -from daisy.registry.api.v1 import hwms def init(mapper): @@ -52,33 +51,6 @@ def init(mapper): action="get_host_clusters", conditions={'method': ['GET']}) - hwms_resource = hwms.create_resource() - - mapper.connect("/hwm", - controller=hwms_resource, - action="add_hwm", - conditions={'method': ['POST']}) - - mapper.connect("/hwm/{id}", - controller=hwms_resource, - action="delete_hwm", - conditions={'method': ['DELETE']}) - - mapper.connect("/hwm/{id}", - controller=hwms_resource, - action="update_hwm", - conditions={'method': ['PUT']}) - - mapper.connect("/hwm", - controller=hwms_resource, - action="hwm_list", - conditions={'method': ['GET']}) - - mapper.connect("/hwm/{id}", - controller=hwms_resource, - action="detail", - conditions=dict(method=["GET"])) - hosts_resource = hosts.create_resource() mapper.connect("/nodes", diff --git a/code/daisy/daisy/registry/api/v1/hosts.py b/code/daisy/daisy/registry/api/v1/hosts.py index b4b2f869..ff88b21e 100755 --- a/code/daisy/daisy/registry/api/v1/hosts.py +++ b/code/daisy/daisy/registry/api/v1/hosts.py @@ -30,10 +30,6 @@ from daisy.common import wsgi import daisy.db from daisy import i18n -from daisyclient import client as daisy_client -from daisy.registry.api.v1 import hwms as registry_hwm -import ConfigParser - reload(sys) sys.setdefaultencoding('utf-8') @@ -67,19 +63,6 @@ class Controller(object): def __init__(self): self.db_api = daisy.db.get_api() - self.daisyclient = self.get_daisyclient() - - @staticmethod - def get_daisyclient(): - """Get Daisy client instance.""" - config_daisy = ConfigParser.ConfigParser() - config_daisy.read("/etc/daisy/daisy-api.conf") - daisy_port = config_daisy.get("DEFAULT", "bind_port") - args = { - 'version': 1.0, - 'endpoint': 'http://127.0.0.1:' + daisy_port - } - return daisy_client.Client(**args) def _get_hosts(self, context, filters, **params): """Get hosts, wrapping in exception if necessary.""" @@ -403,17 +386,10 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to show host %s") % id) raise - param = dict() - param['hwm_ip'] = host_data.hwm_ip - param['hwm_id'] = host_data.hwm_id - controller = registry_hwm.Controller() - hwms = controller.hwm_list(req) - hwms_ip = [hwm['hwm_ip'] for hwm in hwms] - if param['hwm_ip'] in hwms_ip: - result = self.daisyclient.node.location(**param) - location = str(result.rack) + '/' + str(result.position) - else: - location = "" + + # Currently not used + location = "" + host_interface = self.db_api.get_host_interface(req.context, id) role_name = [] diff --git a/code/daisy/daisy/registry/api/v1/hwms.py b/code/daisy/daisy/registry/api/v1/hwms.py deleted file mode 100755 index d39b04a7..00000000 --- a/code/daisy/daisy/registry/api/v1/hwms.py +++ /dev/null @@ -1,355 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Reference implementation registry server WSGI controller -""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import strutils -from oslo_utils import timeutils -from webob import exc - -from daisy.common import exception -from daisy.common import utils -from daisy.common import wsgi -import daisy.db -from daisy import i18n - - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW -CONF = cfg.CONF -DISPLAY_FIELDS_IN_INDEX = ['id', 'description'] -SUPPORTED_FILTERS = ['name', 'description'] -SUPPORTED_SORT_KEYS = ('name', 'description''id', 'created_at', 'updated_at') -SUPPORTED_SORT_DIRS = ('asc', 'desc') -SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir', 'name', - 'description') - - -class Controller(object): - - def __init__(self): - self.db_api = daisy.db.get_api() - - def _get_query_params(self, req): - """Extract necessary query parameters from http request. - - :param req: the Request object coming from the wsgi layer - :retval dictionary of filters to apply to list of templates - """ - params = { - 'filters': self._get_filters(req), - 'limit': self._get_limit(req), - 'sort_key': [self._get_sort_key(req)], - 'sort_dir': [self._get_sort_dir(req)], - 'marker': self._get_marker(req), - } - - for key, value in params.items(): - if value is None: - del params[key] - - return params - - def _get_filters(self, req): - """Return a dictionary of query param filters from the request - - :param req: the Request object coming from the wsgi layer - :retval a dict of key/value filters - """ - filters = {} - properties = {} - - for param in req.params: - if param in SUPPORTED_FILTERS: - filters[param] = req.params.get(param) - if param.startswith('property-'): - _param = param[9:] - properties[_param] = req.params.get(param) - - if 'changes-since' in filters: - isotime = filters['changes-since'] - try: - filters['changes-since'] = timeutils.parse_isotime(isotime) - except ValueError: - raise exc.HTTPBadRequest(_("Unrecognized changes-since value")) - - if 'protected' in filters: - value = self._get_bool(filters['protected']) - if value is None: - raise exc.HTTPBadRequest(_("protected must be True, or " - "False")) - - filters['protected'] = value - - # only allow admins to filter on 'deleted' - if req.context.is_admin: - deleted_filter = self._parse_deleted_filter(req) - if deleted_filter is not None: - filters['deleted'] = deleted_filter - elif 'changes-since' not in filters: - filters['deleted'] = False - elif 'changes-since' not in filters: - filters['deleted'] = False - - if properties: - filters['properties'] = properties - - return filters - - def _get_limit(self, req): - """Parse a limit query param into something usable.""" - try: - limit = int(req.params.get('limit', CONF.limit_param_default)) - except ValueError: - raise exc.HTTPBadRequest(_("limit param must be an integer")) - - if limit < 0: - raise exc.HTTPBadRequest(_("limit param must be positive")) - - return min(CONF.api_limit_max, limit) - - def _get_marker(self, req): - """Parse a marker query param into something usable.""" - marker = req.params.get('marker', None) - - if marker and not utils.is_uuid_like(marker): - msg = _('Invalid marker format') - raise exc.HTTPBadRequest(explanation=msg) - - return marker - - def _get_sort_key(self, req): - """Parse a sort key query param from the request object.""" - sort_key = req.params.get('sort_key', 'created_at') - if sort_key is not None and sort_key not in SUPPORTED_SORT_KEYS: - _keys = ', '.join(SUPPORTED_SORT_KEYS) - msg = _("Unsupported sort_key. Acceptable values: %s") % (_keys,) - raise exc.HTTPBadRequest(explanation=msg) - return sort_key - - def _get_sort_dir(self, req): - """Parse a sort direction query param from the request object.""" - sort_dir = req.params.get('sort_dir', 'desc') - if sort_dir is not None and sort_dir not in SUPPORTED_SORT_DIRS: - _keys = ', '.join(SUPPORTED_SORT_DIRS) - msg = _("Unsupported sort_dir. Acceptable values: %s") % (_keys,) - raise exc.HTTPBadRequest(explanation=msg) - return sort_dir - - def _get_bool(self, value): - value = value.lower() - if value == 'true' or value == '1': - return True - elif value == 'false' or value == '0': - return False - - return None - - def _parse_deleted_filter(self, req): - """Parse deleted into something usable.""" - deleted = req.params.get('deleted') - if deleted is None: - return None - return strutils.bool_from_string(deleted) - - @utils.mutating - def add_hwm(self, req, body): - """Registers a new hwm with the registry. - - :param req: wsgi Request object - :param body: Dictionary of information about the templatae - - :retval Returns the newly-created template information as a mapping, - which will include the newly-created template's internal id - in the 'id' field - """ - hwm_data = body["hwm"] - id = hwm_data.get('id') - - if id and not utils.is_uuid_like(id): - msg = _LI("Rejecting hwm creation request for invalid hwm " - "id '%(bad_id)s'") % {'bad_id': id} - LOG.info(msg) - msg = _("Invalid hwm id format") - return exc.HTTPBadRequest(explanation=msg) - - try: - hwm_data = self.db_api.hwm_add(req.context, hwm_data) - msg = (_LI("Successfully created hwm %s") % - hwm_data["id"]) - LOG.info(msg) - if 'hwm' not in hwm_data: - hwm_data = dict(hwm=hwm_data) - return hwm_data - except exception.Duplicate: - msg = _("hwm with identifier %s already exists!") % id - LOG.warn(msg) - return exc.HTTPConflict(msg) - except exception.Invalid as e: - msg = (_("Failed to add hwm metadata. " - "Got error: %s") % utils.exception_to_str(e)) - LOG.error(msg) - return exc.HTTPBadRequest(msg) - except Exception: - LOG.exception(_LE("Unable to create hwm %s"), id) - raise - - @utils.mutating - def update_hwm(self, req, id, body): - """Registers a new hwm with the registry. - - :param req: wsgi Request object - :param body: Dictionary of information about the template - - :retval Returns the newly-created template information as a mapping, - which will include the newly-created template's internal id - in the 'id' field - """ - hwm_data = body["hwm"] - if id and not utils.is_uuid_like(id): - msg = _LI("Rejecting cluster hwm creation request for invalid " - "hwm id '%(bad_id)s'") % {'bad_id': id} - LOG.info(msg) - msg = _("Invalid hwm id format") - return exc.HTTPBadRequest(explanation=msg) - - try: - hwm_data = self.db_api.hwm_update(req.context, id, hwm_data) - msg = (_LI("Successfully updated hwm %s") % - hwm_data["id"]) - LOG.info(msg) - if 'hwm' not in hwm_data: - hwm_data = dict(hwm=hwm_data) - return hwm_data - except exception.Duplicate: - msg = _("hwm with identifier %s already exists!") % id - LOG.warn(msg) - return exc.HTTPConflict(msg) - except exception.Invalid as e: - msg = (_("Failed to update hwm metadata.Got error: %s") % - utils.exception_to_str(e)) - LOG.error(msg) - return exc.HTTPBadRequest(msg) - except Exception: - LOG.exception(_LE("Unable to update hwm %s"), id) - raise - - @utils.mutating - def delete_hwm(self, req, id): - """Registers a new hwm with the registry. - - :param req: wsgi Request object - :param body: Dictionary of information about the template - - :retval Returns the newly-created template information as a mapping, - which will include the newly-created template's internal id - in the 'id' field - """ - if id and not utils.is_uuid_like(id): - msg = _LI("Rejecting hwm delete request for invalid hwm " - "id '%(bad_id)s'") % {'bad_id': id} - LOG.info(msg) - msg = _("Invalid hwm id format") - return exc.HTTPBadRequest(explanation=msg) - - try: - hwm_data = self.db_api.hwm_destroy(req.context, id) - msg = (_LI("Successfully deleted hwm %s") % id) - LOG.info(msg) - if 'hwm' not in hwm_data: - hwm_data = dict(hwm=hwm_data) - return hwm_data - except exception.Invalid as e: - msg = (_("Failed to delete hwm metadata. " - "Got error: %s") % utils.exception_to_str(e)) - LOG.error(msg) - return exc.HTTPBadRequest(msg) - except Exception: - LOG.exception(_LE("Unable to delete hwm %s"), id) - raise - - @utils.mutating - def hwm_list(self, req): - params = self._get_query_params(req) - try: - filters = params.pop('filters') - marker = params.get('marker') - limit = params.get('limit') - sort_key = params.get('sort_key') - sort_dir = params.get('sort_dir') - return self.db_api.hwm_get_all( - req.context, filters=filters, marker=marker, limit=limit, - sort_key=sort_key, sort_dir=sort_dir) - except exception.NotFound: - LOG.warn(_LW("Invalid marker. hwm %(id)s could not be " - "found.") % {'id': params.get('marker')}) - msg = _("Invalid marker. hwm could not be found.") - raise exc.HTTPBadRequest(explanation=msg) - except exception.Forbidden: - LOG.warn(_LW("Access denied to hwm %(id)s but returning " - "'not found'") % {'id': params.get('marker')}) - msg = _("Invalid marker. hwm could not be found.") - raise exc.HTTPBadRequest(explanation=msg) - except Exception: - LOG.exception(_LE("Unable to list hwm")) - raise - - @utils.mutating - def detail(self, req, id): - """Registers a new hwm with the registry. - - :param req: wsgi Request object - :param body: Dictionary of information about the template - - :retval Returns the newly-created template information as a mapping, - which will include the newly-created template's internal id - in the 'id' field - """ - if id and not utils.is_uuid_like(id): - msg = _LI("Rejecting hwm delete request for invalid hwm " - "id '%(bad_id)s'") % {'bad_id': id} - LOG.info(msg) - msg = _("Invalid hwm id format") - return exc.HTTPBadRequest(explanation=msg) - - try: - hwm_data = self.db_api.hwm_get(req.context, id) - msg = (_LI("Successfully get hwm information:%s") % id) - LOG.info(msg) - if 'hwm' not in hwm_data: - hwm_data = dict(hwm=hwm_data) - return hwm_data - except exception.Invalid as e: - msg = (_("Failed to get hwm metadata. Got error: %s") % - utils.exception_to_str(e)) - LOG.error(msg) - return exc.HTTPBadRequest(msg) - except Exception: - LOG.exception(_LE("Unable to get hwm %s"), id) - raise - - -def create_resource(): - """Hwms resource factory method.""" - deserializer = wsgi.JSONRequestDeserializer() - serializer = wsgi.JSONResponseSerializer() - return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/registry/client/v1/api.py b/code/daisy/daisy/registry/client/v1/api.py index b95b03e6..4c15d95d 100755 --- a/code/daisy/daisy/registry/client/v1/api.py +++ b/code/daisy/daisy/registry/client/v1/api.py @@ -328,31 +328,6 @@ def add_component_metadata(context, component_meta): return c.add_component(component_meta) -def add_hwm_metadata(context, hwm): - c = get_registry_client(context) - return c.add_hwm(hwm) - - -def update_hwm_metadata(context, hwm_id, hwm): - c = get_registry_client(context) - return c.update_hwm(hwm_id, hwm) - - -def delete_hwm_metadata(context, hwm_id): - c = get_registry_client(context) - return c.delete_hwm(hwm_id) - - -def hwm_list_metadata(context, **kwargs): - c = get_registry_client(context) - return c.list_hwm(**kwargs) - - -def hwm_detail_metadata(context, hwm_id): - c = get_registry_client(context) - return c.get_hwm_detail(hwm_id) - - def add_template_metadata(context, template): c = get_registry_client(context) return c.add_template(template) diff --git a/code/daisy/daisy/registry/client/v1/client.py b/code/daisy/daisy/registry/client/v1/client.py index 00329925..be931740 100755 --- a/code/daisy/daisy/registry/client/v1/client.py +++ b/code/daisy/daisy/registry/client/v1/client.py @@ -32,7 +32,6 @@ from daisy.registry.api.v1 import config_sets from daisy.registry.api.v1 import configs from daisy.registry.api.v1 import networks from daisy.registry.api.v1 import template -from daisy.registry.api.v1 import hwms LOG = logging.getLogger(__name__) _LE = i18n._LE @@ -572,58 +571,6 @@ class RegistryClient(client.BaseClient): data = jsonutils.loads(res.read()) return data['template'] - def add_hwm(self, hwm): - """ """ - headers = { - 'Content-Type': 'application/json', - } - - if 'hwm' not in hwm: - hwm = dict(hwm=hwm) - - body = jsonutils.dumps(hwm) - - res = self.do_request("POST", "/hwm", body=body, headers=headers) - # Registry returns a JSONified dict(image=image_info) - data = jsonutils.loads(res.read()) - return data['hwm'] - - def update_hwm(self, hwm_id, hwm): - headers = { - 'Content-Type': 'application/json', - } - if 'hwm' not in hwm: - hwm = dict(hwm=hwm) - - body = jsonutils.dumps(hwm) - - res = self.do_request( - "PUT", - "/hwm/%s" % - hwm_id, - body=body, - headers=headers) - # Registry returns a JSONified dict(image=image_info) - data = jsonutils.loads(res.read()) - return data['hwm'] - - def delete_hwm(self, hwm_id): - res = self.do_request("DELETE", "/hwm/%s" % hwm_id) - data = jsonutils.loads(res.read()) - return data['hwm'] - - def list_hwm(self, **kwargs): - """ """ - params = self._extract_params(kwargs, hwms.SUPPORTED_PARAMS) - res = self.do_request("GET", "/hwm", params=params) - data = jsonutils.loads(res.read()) - return data - - def get_hwm_detail(self, hwm_id): - res = self.do_request("GET", "/hwm/%s" % hwm_id) - data = jsonutils.loads(res.read()) - return data['hwm'] - def add_host_template(self, template): """ """ headers = { diff --git a/code/daisyclient/daisyclient/v1/client.py b/code/daisyclient/daisyclient/v1/client.py index 60bd195e..319ee5de 100755 --- a/code/daisyclient/daisyclient/v1/client.py +++ b/code/daisyclient/daisyclient/v1/client.py @@ -32,8 +32,6 @@ from daisyclient.v1.uninstall import UninstallManager from daisyclient.v1.update import UpdateManager from daisyclient.v1.disk_array import DiskArrayManager from daisyclient.v1.template import TemplateManager -from daisyclient.v1.hwm_nodes import NodeManager -from daisyclient.v1.hwms import HwmManager from daisyclient.v1.backup_restore import BackupRestoreManager @@ -69,6 +67,4 @@ class Client(object): self.update = UpdateManager(self.http_client) self.disk_array = DiskArrayManager(self.http_client) self.template = TemplateManager(self.http_client) - self.node = NodeManager(self.http_client) - self.hwm = HwmManager(self.http_client) self.backup_restore = BackupRestoreManager(self.http_client) diff --git a/code/daisyclient/daisyclient/v1/clusters.py b/code/daisyclient/daisyclient/v1/clusters.py index a57595b7..df50d76c 100755 --- a/code/daisyclient/daisyclient/v1/clusters.py +++ b/code/daisyclient/daisyclient/v1/clusters.py @@ -28,8 +28,7 @@ UPDATE_PARAMS = ( 'dns_nameservers', 'net_l23_provider', 'base_mac', 'internal_gateway', 'internal_cidr', 'external_cidr', 'gre_id_range', 'vlan_range', 'vni_range', 'segmentation_type', 'public_vip', 'logic_networks', - 'networking_parameters', 'routers', 'auto_scale', 'use_dns', - 'hwm_ip' + 'networking_parameters', 'routers', 'auto_scale', 'use_dns' ) CREATE_PARAMS = ( @@ -37,8 +36,7 @@ CREATE_PARAMS = ( 'dns_nameservers', 'net_l23_provider', 'base_mac', 'internal_gateway', 'internal_cidr', 'external_cidr', 'gre_id_range', 'vlan_range', 'vni_range', 'segmentation_type', 'public_vip', 'logic_networks', - 'networking_parameters', 'routers', 'auto_scale', 'use_dns', - 'hwm_ip' + 'networking_parameters', 'routers', 'auto_scale', 'use_dns' ) DEFAULT_PAGE_SIZE = 20 diff --git a/code/daisyclient/daisyclient/v1/hosts.py b/code/daisyclient/daisyclient/v1/hosts.py index 32b9c216..5de3fe35 100755 --- a/code/daisyclient/daisyclient/v1/hosts.py +++ b/code/daisyclient/daisyclient/v1/hosts.py @@ -29,7 +29,7 @@ UPDATE_PARAMS = ('name', 'resource_type', 'dmi_uuid', 'role', 'cluster', 'os_status', 'interfaces', 'is_deployment', 'description', 'deleted', 'status', 'ipmi_user', 'ipmi_passwd', 'ipmi_addr', 'ip', 'status', 'user', - 'passwd', 'hwm_id', 'hwm_ip', 'cluster_id', + 'passwd', 'cluster_id', 'vcpu_pin_set', 'dvs_high_cpuset', 'pci_high_cpuset', 'os_cpus', 'dvs_cpus', 'config_set_id', 'system', 'cpu', 'memory', 'disk', 'devices', 'pci') @@ -38,7 +38,7 @@ CREATE_PARAMS = ('id', 'name', 'description', 'resource_type', 'dmi_uuid', 'role', 'cluster', 'os_version', 'os_status', 'interfaces', 'is_deployment', 'status', 'ipmi_user', 'ipmi_passwd', 'ipmi_addr', 'ip', 'status', 'user', - 'passwd', 'hwm_id', 'hwm_ip', 'cluster_id', + 'passwd', 'cluster_id', 'vcpu_pin_set', 'dvs_high_cpuset', 'pci_high_cpuset', 'os_cpus', 'dvs_cpus', 'config_set_id', 'system', 'cpu', 'memory', 'disk', 'devices', 'pci') @@ -316,23 +316,6 @@ class HostManager(base.ManagerWithFind): return Host(self, self._format_host_meta_for_user(body)) - def get_min_mac(self, hwm_id): - params = dict() - resp, body = self.client.get('/v1/nodes') - hosts = body.get('nodes') - if hosts: - for host in hosts: - if hwm_id == host.get('hwm_id'): - resp, host_body = self.client.get('/v1/nodes/%s' % - host['id']) - interfaces = host_body['host'].get('interfaces') - if interfaces: - mac_list = [interface['mac'] for interface in - interfaces if interface.get('mac')] - if mac_list: - params['mac'] = min(mac_list) - return params - def add_discover_host(self, **kwargs): """Add a discover host @@ -349,9 +332,6 @@ class HostManager(base.ManagerWithFind): msg = 'create() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - hwm_id = fields.get('hwm_id') - params = self.get_min_mac(hwm_id) - fields['mac'] = params.get('mac') hdrs = self._host_meta_to_headers(fields) resp, body = self.client.post('/v1/discover/nodes', diff --git a/code/daisyclient/daisyclient/v1/hwm_nodes.py b/code/daisyclient/daisyclient/v1/hwm_nodes.py deleted file mode 100755 index c701ab6a..00000000 --- a/code/daisyclient/daisyclient/v1/hwm_nodes.py +++ /dev/null @@ -1,407 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys -import copy - -from oslo_utils import encodeutils -from oslo_utils import strutils -import six - -from daisyclient.common import utils -from daisyclient.openstack.common.apiclient import base -from daisyclient.common.http import HTTPClient - -reload(sys) -sys.setdefaultencoding('utf-8') - -DEFAULT_PAGE_SIZE = 200 - -SORT_DIR_VALUES = ('asc', 'desc') -SORT_KEY_VALUES = ('serialNo', 'created_at', 'updated_at', 'status') - -OS_REQ_ID_HDR = 'x-openstack-request-id' - - -class Node(base.Resource): - def __repr__(self): - return "" % self._info - - def update(self, **fields): - self.manager.update(self, **fields) - - def delete(self, **kwargs): - return self.manager.delete(self) - - def data(self, **kwargs): - return self.manager.data(self, **kwargs) - - -class NodeManager(base.ManagerWithFind): - resource_class = Node - - def get_hwm_client(self, hwm_ip): - if hwm_ip: - endpoint = "http://" + hwm_ip + ":8089" - client = HTTPClient(endpoint) - else: - client = self.client - - return client - - def _list(self, url, hwm_ip, response_key, obj_class=None, body=None): - hwm_client = self.get_hwm_client(hwm_ip) - resp, body = hwm_client.get(url) - - if obj_class is None: - obj_class = self.resource_class - - data = body[response_key] - return ([obj_class(self, res, loaded=True) for res in data if res], - resp) - - def _host_meta_from_headers(self, headers): - meta = {'properties': {}} - safe_decode = encodeutils.safe_decode - for key, value in six.iteritems(headers): - value = safe_decode(value, incoming='utf-8') - if key.startswith('x-image-meta-property-'): - _key = safe_decode(key[22:], incoming='utf-8') - meta['properties'][_key] = value - elif key.startswith('x-image-meta-'): - _key = safe_decode(key[13:], incoming='utf-8') - meta[_key] = value - - for key in ['is_public', 'protected', 'deleted']: - if key in meta: - meta[key] = strutils.bool_from_string(meta[key]) - - return self._format_host_meta_for_user(meta) - - def _host_meta_to_headers(self, fields): - headers = {} - fields_copy = copy.deepcopy(fields) - - # NOTE(flaper87): Convert to str, headers - # that are not instance of basestring. All - # headers will be encoded later, before the - # request is sent. - - for key, value in six.iteritems(fields_copy): - headers['%s' % key] = utils.to_str(value) - return headers - - @staticmethod - def _format_host_meta_for_user(meta): - for key in ['size', 'min_ram', 'min_disk']: - if key in meta: - try: - meta[key] = int(meta[key]) if meta[key] else 0 - except ValueError: - pass - return meta - - def _build_params(self, parameters): - params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)} - - if 'marker' in parameters: - params['marker'] = parameters['marker'] - - sort_key = parameters.get('sort_key') - if sort_key is not None: - if sort_key in SORT_KEY_VALUES: - params['sort_key'] = sort_key - else: - raise ValueError('sort_key must be one of the following: %s.' - % ', '.join(SORT_KEY_VALUES)) - - sort_dir = parameters.get('sort_dir') - if sort_dir is not None: - if sort_dir in SORT_DIR_VALUES: - params['sort_dir'] = sort_dir - else: - raise ValueError('sort_dir must be one of the following: %s.' - % ', '.join(SORT_DIR_VALUES)) - - filters = parameters.get('filters', {}) - params.update(filters) - - return params - - def list(self, **kwargs): - """Get a list of nodes. - :param page_size: number of items to request in each paginated request - :param limit: maximum number of hosts to return - :param marker: begin returning hosts that appear later in the host - list than that represented by this host id - :param filters: dict of direct comparison filters that mimics the - structure of an host object - :param return_request_id: If an empty list is provided, populate this - list with the request ID value from the header - x-openstack-request-id - :rtype: list of :class:`Host` - """ - absolute_limit = kwargs.get('limit') - page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) - - def paginate(qp, return_request_id=None): - for param, value in six.iteritems(qp): - if isinstance(value, six.string_types): - # Note(flaper87) Url encoding should - # be moved inside http utils, at least - # shouldn't be here. - # - # Making sure all params are str before - # trying to encode them - qp[param] = encodeutils.safe_decode(value) - - hwm_ip = kwargs.get('hwm_ip') - url = '/api/v1.0/hardware/nodes' - nodes, resp = self._list(url, hwm_ip, "nodes") - - if return_request_id is not None: - return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - - for node in nodes: - yield node - - return_request_id = kwargs.get('return_req_id', None) - - params = self._build_params(kwargs) - - seen = 0 - while True: - seen_last_page = 0 - filtered = 0 - for host in paginate(params, return_request_id): - last_host = host.serialNo - - if (absolute_limit is not None and - seen + seen_last_page >= absolute_limit): - # Note(kragniz): we've seen enough images - return - else: - seen_last_page += 1 - yield host - - seen += seen_last_page - - if seen_last_page + filtered == 0: - # Note(kragniz): we didn't get any hosts in the last page - return - - if absolute_limit is not None and seen >= absolute_limit: - # Note(kragniz): reached the limit of hosts to return - return - - if page_size and seen_last_page + filtered < page_size: - # Note(kragniz): we've reached the last page of the hosts - return - - # Note(kragniz): there are more hosts to come - params['marker'] = last_host - seen_last_page = 0 - - def location(self, **kwargs): - """Get location of node.""" - hwm_ip = kwargs.get('hwm_ip') - hwm_id = kwargs.get('hwm_id') - hwm_client = self.get_hwm_client(hwm_ip) - url = '/api/v1.0/hardware/nodes/%s/location' % hwm_id - resp, body = hwm_client.get(url) - return_request_id = kwargs.get('return_req_id', None) - if return_request_id is not None: - return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - - return Node(self, self._format_host_meta_for_user(body)) - - def restart(self, **kwargs): - """Restart node.""" - hdrs = {} - hwm_ip = kwargs.get('hwm_ip') - hwm_id = kwargs.get('hwm_id') - hwm_client = self.get_hwm_client(hwm_ip) - url = '/api/v1.0/hardware/nodes/%s/restart_actions' % hwm_id - resp, body = hwm_client.post(url, headers=hdrs, data=hdrs) - return_request_id = kwargs.get('return_req_id', None) - if return_request_id is not None: - return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - - return Node(self, self._format_host_meta_for_user(body)) - - def restart_state(self, **kwargs): - """Get restart state of node.""" - hwm_ip = kwargs.get('hwm_ip') - action_id = kwargs.get('action_id') - hwm_client = self.get_hwm_client(hwm_ip) - url = '/api/v1.0/hardware/nodes/restart_actions/%s' % action_id - resp, body = hwm_client.get(url) - return_request_id = kwargs.get('return_req_id', None) - if return_request_id is not None: - return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - - return Node(self, self._format_host_meta_for_user(body)) - - def set_boot(self, **kwargs): - """Set boot type of node.""" - hdrs = {} - hwm_ip = kwargs.get('hwm_ip') - hwm_id = kwargs.get('hwm_id') - boot_type = kwargs.get('boot_type') - hwm_client = self.get_hwm_client(hwm_ip) - url = '/api/v1.0/hardware/nodes/%s/one_time_boot?from=%s' % \ - (hwm_id, boot_type) - resp, body = hwm_client.post(url, headers=hdrs, data=hdrs) - return_request_id = kwargs.get('return_req_id', None) - if return_request_id is not None: - return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - - return Node(self, self._format_host_meta_for_user(body)) - - def update(self, **kwargs): - """Update hosts.""" - absolute_limit = kwargs.get('limit') - page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) - - hwm_ip = kwargs.get('hwm_ip') - hwm_client = self.get_hwm_client(hwm_ip) - hwm_url = '/api/v1.0/hardware/nodes' - hwm_resp, hwm_body = hwm_client.get(hwm_url) - hwm_body['hwm_ip'] = hwm_ip - - def paginate(qp, return_request_id=None): - for param, value in six.iteritems(qp): - if isinstance(value, six.string_types): - # Note(flaper87) Url encoding should - # be moved inside http utils, at least - # shouldn't be here. - # - # Making sure all params are str before - # trying to encode them - qp[param] = encodeutils.safe_decode(value) - - hdrs = self._host_meta_to_headers(hwm_body) - url = '/v1/hwm_nodes' - resp, body = self.client.post(url, headers={}, data=hdrs) - obj_class = self.resource_class - hosts = [obj_class(self, res, loaded=True) for res in body['nodes'] - if res] - - if return_request_id is not None: - return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - - for host in hosts: - yield host - - return_request_id = kwargs.get('return_req_id', None) - - params = self._build_params(kwargs) - - seen = 0 - while True: - seen_last_page = 0 - filtered = 0 - for host in paginate(params, return_request_id): - last_host = host.id - - if (absolute_limit is not None and - seen + seen_last_page >= absolute_limit): - # Note(kragniz): we've seen enough images - return - else: - seen_last_page += 1 - yield host - - seen += seen_last_page - - if seen_last_page + filtered == 0: - # Note(kragniz): we didn't get any hosts in the last page - return - - if absolute_limit is not None and seen >= absolute_limit: - # Note(kragniz): reached the limit of hosts to return - return - - if page_size and seen_last_page + filtered < page_size: - # Note(kragniz): we've reached the last page of the hosts - return - - # Note(kragniz): there are more hosts to come - params['marker'] = last_host - seen_last_page = 0 - - def cloud_state(self, **kwargs): - """To inform provider the cloud state.""" - hdrs = dict() - fields = dict() - provider_ip = kwargs.pop('provider_ip') - operation = kwargs.get('operation') - fields["envName"] = kwargs.get('name') - fields["envUrl"] = kwargs.get('url') - hwm_url = '/v1/hwm' - resp, hwm_body = self.client.get(hwm_url) - hwms_ip = [hwm['hwm_ip'] for hwm in hwm_body['hwm']] - if provider_ip in hwms_ip: - url = '/api/envChangeNotification' - provider_client = self.get_hwm_client(provider_ip) - if operation == "add": - hdrs = {"add_environment": fields} - if operation == "delete": - hdrs = {"delete_environment": fields} - - resp, body = provider_client.post(url, data=hdrs) - else: - return - - def get_min_mac(self, hwm_id): - params = dict() - resp, body = self.client.get('/v1/nodes') - hosts = body.get('nodes') - if hosts: - for host in hosts: - if hwm_id == host.get('hwm_id'): - params['host_id'] = host['id'] - resp, host_body = self.client.get('/v1/nodes/%s' % - host['id']) - interfaces = host_body['host'].get('interfaces') - if interfaces: - mac_list = [interface['mac'] for interface in - interfaces if interface.get('mac')] - if mac_list: - params['mac'] = min(mac_list) - return params - - def pxe_host_discover(self, **kwargs): - """Pxe host discover.""" - hdrs = dict() - hwm_ip = kwargs.get('hwm_ip') - hwm_id = kwargs.get('hwm_id') - hwm_client = self.get_hwm_client(hwm_ip) - pxe_url = '/api/v1.0/hardware/nodes/%s/one_time_boot?from=pxe' % \ - hwm_id - resp, pxe_body = hwm_client.post(pxe_url, headers=hdrs, data=hdrs) - params = self.get_min_mac(hwm_id) - params['status'] = "DISCOVERING" - resp, body = self.client.post( - '/v1/pxe_discover/nodes', headers=params, data=params) - restart_url = '/api/v1.0/hardware/nodes/%s/restart_actions' % \ - hwm_id - resp, restart_body = hwm_client.post(restart_url, headers=hdrs, - data=hdrs) - return_request_id = kwargs.get('return_req_id', None) - if return_request_id is not None: - return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - - return Node(self, self._format_host_meta_for_user(restart_body)) diff --git a/code/daisyclient/daisyclient/v1/hwms.py b/code/daisyclient/daisyclient/v1/hwms.py deleted file mode 100755 index 8b597ab7..00000000 --- a/code/daisyclient/daisyclient/v1/hwms.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from oslo_utils import encodeutils -from oslo_utils import strutils -import six -import six.moves.urllib.parse as urlparse - -from daisyclient.common import utils -from daisyclient.openstack.common.apiclient import base - -UPDATE_PARAMS = ('hwm_ip', 'description') - -CREATE_PARAMS = ('id', 'hwm_ip', 'description') - -DEFAULT_PAGE_SIZE = 20 - -SORT_DIR_VALUES = ('asc', 'desc') -SORT_KEY_VALUES = ('name', 'created_at', 'updated_at') - -OS_REQ_ID_HDR = 'x-openstack-request-id' - - -class Hwm(base.Resource): - def __repr__(self): - return "" % self._info - - def update(self, **fields): - self.manager.update(self, **fields) - - def delete(self, **kwargs): - return self.manager.delete(self) - - def data(self, **kwargs): - return self.manager.data(self, **kwargs) - - -class HwmManager(base.ManagerWithFind): - resource_class = Hwm - - def _list(self, url, response_key, obj_class=None, body=None): - resp, body = self.client.get(url) - - if obj_class is None: - obj_class = self.resource_class - - data = body[response_key] - return ([obj_class(self, res, loaded=True) for res in data if res], - resp) - - def _service_meta_from_headers(self, headers): - meta = {'properties': {}} - safe_decode = encodeutils.safe_decode - for key, value in six.iteritems(headers): - value = safe_decode(value, incoming='utf-8') - if key.startswith('x-image-meta-property-'): - _key = safe_decode(key[22:], incoming='utf-8') - meta['properties'][_key] = value - elif key.startswith('x-image-meta-'): - _key = safe_decode(key[13:], incoming='utf-8') - meta[_key] = value - - for key in ['is_public', 'protected', 'deleted']: - if key in meta: - meta[key] = strutils.bool_from_string(meta[key]) - - return self._format_template_meta_for_user(meta) - - def _template_meta_to_headers(self, fields): - headers = {} - fields_copy = copy.deepcopy(fields) - - # NOTE(flaper87): Convert to str, headers - # that are not instance of basestring. All - # headers will be encoded later, before the - # request is sent. - - for key, value in six.iteritems(fields_copy): - headers['%s' % key] = utils.to_str(value) - return headers - - @staticmethod - def _format_image_meta_for_user(meta): - for key in ['size', 'min_ram', 'min_disk']: - if key in meta: - try: - meta[key] = int(meta[key]) if meta[key] else 0 - except ValueError: - pass - return meta - - @staticmethod - def _format_template_meta_for_user(meta): - for key in ['size', 'min_ram', 'min_disk']: - if key in meta: - try: - meta[key] = int(meta[key]) if meta[key] else 0 - except ValueError: - pass - return meta - - def _build_params(self, parameters): - params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)} - - if 'marker' in parameters: - params['marker'] = parameters['marker'] - - sort_key = parameters.get('sort_key') - if sort_key is not None: - if sort_key in SORT_KEY_VALUES: - params['sort_key'] = sort_key - else: - raise ValueError('sort_key must be one of the following: %s.' - % ', '.join(SORT_KEY_VALUES)) - - sort_dir = parameters.get('sort_dir') - if sort_dir is not None: - if sort_dir in SORT_DIR_VALUES: - params['sort_dir'] = sort_dir - else: - raise ValueError('sort_dir must be one of the following: %s.' - % ', '.join(SORT_DIR_VALUES)) - - filters = parameters.get('filters', {}) - params.update(filters) - - return params - - def get(self, hwm_id): - """get hwm information by id.""" - url = "/v1/hwm/%s" % base.getid(hwm_id) - resp, body = self.client.get(url) - return Hwm(self, self._format_template_meta_for_user(body['hwm'])) - - def list(self, **kwargs): - """Get a list of hwm. - - :param page_size: number of items to request in each paginated request - :param limit: maximum number of services to return - :param marker: begin returning services that appear later in the - service ist than that represented by this service id - :param filters: dict of direct comparison filters that mimics the - structure of an service object - :param return_request_id: If an empty list is provided, populate this - list with the request ID value from the header - x-openstack-request-id - :rtype: list of :class:`Service` - """ - absolute_limit = kwargs.get('limit') - - def paginate(qp, return_request_id=None): - for param, value in six.iteritems(qp): - if isinstance(value, six.string_types): - # Note(flaper87) Url encoding should - # be moved inside http utils, at least - # shouldn't be here. - # - # Making sure all params are str before - # trying to encode them - qp[param] = encodeutils.safe_decode(value) - url = '/v1/hwm?%s' % urlparse.urlencode(qp) - hwms, resp = self._list(url, "hwm") - - if return_request_id is not None: - return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - - for hwm in hwms: - yield hwm - - return_request_id = kwargs.get('return_req_id', None) - - params = self._build_params(kwargs) - - seen = 0 - seen_last_page = 0 - - for hwm in paginate(params, return_request_id): - if (absolute_limit is not None and - seen + seen_last_page >= absolute_limit): - # Note(kragniz): we've seen enough images - return - else: - seen_last_page += 1 - yield hwm - - def add(self, **kwargs): - """Add a hwm. - - TODO(bcwaldon): document accepted params - """ - fields = {} - for field in kwargs: - if field in CREATE_PARAMS: - fields[field] = kwargs[field] - elif field == 'return_req_id': - continue - else: - msg = 'create() got an unexpected keyword argument \'%s\'' - raise TypeError(msg % field) - hdrs = self._template_meta_to_headers(fields) - - resp, body = self.client.post('/v1/hwm', headers=hdrs, data=hdrs) - return_request_id = kwargs.get('return_req_id', None) - if return_request_id is not None: - return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - - return Hwm(self, self._format_template_meta_for_user(body['hwm'])) - - def delete(self, hwm_id): - """Delete a hwm.""" - url = "/v1/hwm/%s" % base.getid(hwm_id) - resp, body = self.client.delete(url) - - def update(self, hwm_id, **kwargs): - """Update an hwm""" - hdrs = {} - fields = {} - for field in kwargs: - if field in UPDATE_PARAMS: - fields[field] = kwargs[field] - elif field == 'return_req_id': - continue - else: - msg = 'update() got an unexpected keyword argument \'%s\'' - raise TypeError(msg % field) - - hdrs.update(self._template_meta_to_headers(fields)) - url = '/v1/hwm/%s' % base.getid(hwm_id) - resp, body = self.client.put(url, headers=None, data=hdrs) - return_request_id = kwargs.get('return_req_id', None) - if return_request_id is not None: - return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - - return Hwm(self, self._format_template_meta_for_user(body['hwm'])) diff --git a/code/daisyclient/daisyclient/v1/shell.py b/code/daisyclient/daisyclient/v1/shell.py index a2413092..8b328110 100755 --- a/code/daisyclient/daisyclient/v1/shell.py +++ b/code/daisyclient/daisyclient/v1/shell.py @@ -41,7 +41,6 @@ import daisyclient.v1.uninstall import daisyclient.v1.update import daisyclient.v1.disk_array import daisyclient.v1.template -import daisyclient.v1.hwms from daisyclient.v1 import param_helper import daisyclient.v1.backup_restore @@ -97,10 +96,6 @@ def _daisy_show(daisy, max_column_width=80): help='node network interface detail, \ ip must be given if assigned_networks is empty,\ and cluster must be given if assigned_networks is not empty.') -@utils.arg('--hwm-id', metavar='', - help='The id of hwm host.') -@utils.arg('--hwm-ip', metavar='', - help='The ip of hwm.') @utils.arg('--vcpu-pin-set', metavar='', help='Set the vcpu pin.') @utils.arg('--dvs-high-cpuset', metavar='', @@ -249,10 +244,6 @@ def do_host_delete(gc, args): help='size of hugepage.') @utils.arg('--hugepages', metavar='', help='number of hugepages.') -@utils.arg('--hwm-id', metavar='', - help='The id of hwm host.') -@utils.arg('--hwm-ip', metavar='', - help='The ip of hwm.') @utils.arg('--vcpu-pin-set', metavar='', help='Set the vcpu pin.') @utils.arg('--dvs-high-cpuset', metavar='', @@ -345,9 +336,9 @@ def do_host_list(gc, args): hosts = gc.hosts.list(**kwargs) - columns = ['ID', 'Hwm_id', 'Name', 'Description', 'Resource_type', + columns = ['ID', 'Name', 'Description', 'Resource_type', 'Status', 'Os_progress', 'Os_status', 'Discover_state', - 'Messages', 'Hwm_ip'] + 'Messages'] # if filters.has_key('cluster_id'): if 'cluster_id' in filters: role_columns = ['Role_progress', 'Role_status', 'Role_messages'] @@ -455,7 +446,7 @@ def do_discover_host_list(gc, args): filters = dict([item for item in filter_items if item[1] is not None]) kwargs = {'filters': filters} discover_hosts = gc.hosts.list_discover_host(**kwargs) - columns = ['Id', 'Mac', 'Ip', 'User', 'Passwd', 'Status', 'Message', + columns = ['Id', 'Ip', 'User', 'Passwd', 'Status', 'Message', 'Host_id', 'Cluster_id'] utils.print_list(discover_hosts, columns) @@ -692,7 +683,7 @@ def do_cluster_list(gc, args): clusters = gc.clusters.list(**kwargs) columns = ['ID', 'Name', 'Description', 'Nodes', 'Networks', - 'Auto_scale', 'Use_dns', 'Hwm_ip', 'Status'] + 'Auto_scale', 'Use_dns', 'Status'] utils.print_list(clusters, columns) @@ -2308,161 +2299,6 @@ def do_delete_host_template(dc, args): _daisy_show(host_template) -@utils.arg('--hwm-ip', metavar='', help='The ip of hwm') -def do_node_list(gc, args): - """Get all nodes from hwm.""" - fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) - result = gc.node.list(**fields) - columns = ['id', 'cpuCore', 'cpuFrequency', 'memory', 'disk', - 'hardwareType', 'hardwareStatus', 'interfaces'] - utils.print_list(result, columns, conver_field=False) - - -@utils.arg('--hwm-ip', metavar='', help='The ip of hwm') -@utils.arg('hwm_id', metavar='', help='The id of hwm') -def do_node_location(gc, args): - """Get node location from hwm.""" - fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) - location = gc.node.location(**fields) - _daisy_show(location) - - -@utils.arg('--hwm-ip', metavar='', help='The ip of hwm') -@utils.arg('hwm_id', metavar='', help='The id of hwm') -def do_node_restart(gc, args): - """Restart node.""" - fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) - location = gc.node.restart(**fields) - _daisy_show(location) - - -@utils.arg('--hwm-ip', metavar='', help='The ip of hwm') -@utils.arg('action_id', metavar='', - help='The action id of nodes') -def do_restart_state(gc, args): - """Get restart state of node.""" - fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) - location = gc.node.restart_state(**fields) - _daisy_show(location) - - -@utils.arg('--hwm-ip', metavar='', help='The ip of hwm') -@utils.arg('--boot-type', metavar='', help='The node boot type') -@utils.arg('hwm_id', metavar='', help='The id of hwm') -def do_set_boot(gc, args): - """Set boot type of node.""" - fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) - location = gc.node.set_boot(**fields) - _daisy_show(location) - - -@utils.arg('--hwm-ip', metavar='', help='The ip of hwm') -def do_node_update(gc, args): - """Update hosts.""" - fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) - result = gc.node.update(**fields) - columns = ['ID', 'Hwm_id', 'Hwm_ip', 'Name', 'Description', - 'Resource_type', 'Status', 'Os_progress', 'Os_status', - 'Messages'] - utils.print_list(result, columns) - - -@utils.arg('hwm_id', metavar='', help='The id of hwm') -@utils.arg('--hwm-ip', metavar='', help='The ip of hwm') -def do_pxe_host_discover(gc, args): - """Discover host with pxe.""" - fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) - host = gc.node.pxe_host_discover(**fields) - _daisy_show(host) - - -@utils.arg('hwm_ip', metavar='', - help='Hwm ip to be added.') -@utils.arg('--description', metavar='', - help='Hwm description to be added.') -def do_hwm_add(gc, args): - """Add a hwm.""" - fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) - - # Filter out values we can't use - CREATE_PARAMS = daisyclient.v1.hwms.CREATE_PARAMS - fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) - - hwm = gc.hwm.add(**fields) - _daisy_show(hwm) - - -@utils.arg('hwm', metavar='', help='ID of hwm to modify.') -@utils.arg('--hwm-ip', metavar='', help='The ip of hwm.') -@utils.arg('--description', metavar='', - help='Description of hwm.') -def do_hwm_update(gc, args): - """Update a specific hwm.""" - fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) - hwm_arg = fields.pop('hwm') - hwm = utils.find_resource(gc.hwm, hwm_arg) - - # Filter out values we can't use - UPDATE_PARAMS = daisyclient.v1.hwms.UPDATE_PARAMS - fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items())) - hwm = gc.hwm.update(hwm, **fields) - _daisy_show(hwm) - - -@utils.arg('--page-size', metavar='', default=None, type=int, - help='Number of hwms to request in each paginated request.') -@utils.arg('--sort-key', default=None, - choices=daisyclient.v1.hwms.SORT_KEY_VALUES, - help='Sort hwm list by specified field.') -@utils.arg('--sort-dir', default='asc', - choices=daisyclient.v1.hwms.SORT_DIR_VALUES, - help='Sort hwm list in specified direction.') -def do_hwm_list(gc, args): - """List hwms you can access.""" - kwargs = {'filters': {}} - if args.page_size is not None: - kwargs['page_size'] = args.page_size - - kwargs['sort_key'] = args.sort_key - kwargs['sort_dir'] = args.sort_dir - - hwms = gc.hwm.list(**kwargs) - columns = ['ID', 'Hwm_ip', 'Description'] - utils.print_list(hwms, columns) - - -@utils.arg('id', metavar='', - help='Filter hwm to those that have this id.') -def do_hwm_detail(gc, args): - """List hwm you can access.""" - host = utils.find_resource(gc.hwm, args.id) - _daisy_show(host) - - -@utils.arg('hwms', metavar='', nargs='+', - help='ID of hwm(s) to delete.') -def do_hwm_delete(gc, args): - """Delete specified hwm(s).""" - for args_hwm in args.hwms: - hwm = utils.find_resource(gc.hwm, args_hwm) - if hwm and hwm.deleted: - msg = "No hwm with an ID of '%s' exists." % hwm.id - raise exc.CommandError(msg) - try: - if args.verbose: - print('Requesting hwm delete for %s ...' % - encodeutils.safe_decode(args_hwm), end=' ') - gc.hwm.delete(hwm) - - if args.verbose: - print('[Done]') - - except exc.HTTPException as e: - if args.verbose: - print('[Fail]') - print('%s: Unable to delete hwm %s' % (e, args_hwm)) - - @utils.arg('--provider-ip', metavar='', help='The ip of provider.') @utils.arg('--operation', metavar='', diff --git a/test/tempest/tempest/api/daisy/base.py b/test/tempest/tempest/api/daisy/base.py index 6a6b5dc2..945737c3 100644 --- a/test/tempest/tempest/api/daisy/base.py +++ b/test/tempest/tempest/api/daisy/base.py @@ -75,39 +75,6 @@ class BaseDaisyTest(tempest.test.BaseTestCase): for cluster in clusters_list: self.delete_cluster(cluster) - @classmethod - def add_hwm(self, **hwm_meta): - hwm_info = self.daisy_client.hwm.add(**hwm_meta) - return hwm_info - - @classmethod - def delete_hwm(self, hwm_meta): - self.daisy_client.hwm.delete(hwm_meta) - - @classmethod - def update_hwm(self, hwm_id, **hwm_meta): - hwm_info = self.daisy_client.hwm.update(hwm_id, **hwm_meta) - return hwm_info - - @classmethod - def _clean_all_hwm(self): - hwm_list_generator = self.daisy_client.hwm.list() - hwm_list = [hwms for hwms in hwm_list_generator] - if hwm_list: - for hwm in hwm_list: - self.delete_hwm(hwm) - - @classmethod - def list_hwm(self, **hwm_meta): - hwm_meta['filters'] = hwm_meta - hwm_list = self.daisy_client.hwm.list(**hwm_meta) - return hwm_list - - @classmethod - def get_hwm_detail(self, hwm_meta): - hwm_detail = self.daisy_client.hwm.get(hwm_meta) - return hwm_detail - @classmethod def add_host(self, **host_meta): host_info = self.daisy_client.hosts.add(**host_meta) diff --git a/test/tempest/tempest/api/daisy/v1/test_cluster.py b/test/tempest/tempest/api/daisy/v1/test_cluster.py index d1c4d727..fbbd1534 100755 --- a/test/tempest/tempest/api/daisy/v1/test_cluster.py +++ b/test/tempest/tempest/api/daisy/v1/test_cluster.py @@ -111,9 +111,6 @@ class TecsClusterTest(base.BaseDaisyTest): cls.cluster_meta8 = {'description': "test_add_host7", 'name': "test_add_host7", 'auto_scale': 1} - cls.cluster_meta9 = {'description': "test_with_hwm", - 'name': "test_with_hwm", - 'hwm_ip': "10.43.211.63"} def private_network_add(self): private_network_params = self.fake.fake_private_network_parameters() @@ -262,17 +259,6 @@ class TecsClusterTest(base.BaseDaisyTest): # cluster = self.get_cluster(cluster_info.id) self.delete_cluster(cluster_info.id) - def test_update_cluster_with_hwm(self): - self.private_network_add() - cluster_info = self.add_cluster(**self.cluster_meta1) - hwm_meta = {"hwm_ip": "10.43.211.63"} - cluster_update_info = self.update_cluster(cluster_info.id, - **hwm_meta) - self.assertEqual("10.43.211.63", - cluster_update_info.hwm_ip, - "Update cluster with hwm_ip failed") - self.delete_cluster(cluster_info.id) - def test_update_cluster_with_networking_parameters_add_router(self): """ """ self.private_network_add() @@ -460,15 +446,6 @@ class TecsClusterTest(base.BaseDaisyTest): "===============") self.delete_cluster(cluster_info.id) - def test_add_cluster_with_hwm(self): - self.private_network_add() - cluster_info = self.add_cluster(**self.cluster_meta9) - if cluster_info: - self.assertEqual(self.cluster_meta9['hwm_ip'], - cluster_info.hwm_ip, - "Add cluster with hwm_ip failed") - self.delete_cluster(cluster_info.id) - def tearDown(self): if self.cluster_meta1.get('nodes', None): del self.cluster_meta1['nodes'] diff --git a/test/tempest/tempest/api/daisy/v1/test_hwm.py b/test/tempest/tempest/api/daisy/v1/test_hwm.py deleted file mode 100755 index 78a969a2..00000000 --- a/test/tempest/tempest/api/daisy/v1/test_hwm.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest.api.daisy import base -from tempest import config -from fake.logical_network_fake import FakeLogicNetwork as logical_fake -CONF = config.CONF - - -class DaisyHwmTest(base.BaseDaisyTest): - @classmethod - def resource_setup(cls): - super(DaisyHwmTest, cls).resource_setup() - cls.fake = logical_fake() - - cls.hwm_meta = {'hwm_ip': '10.43.211.63', - 'description': 'the first hwm'} - - def test_add_hwm(self): - hwm = self.add_hwm(**self.hwm_meta) - self.assertEqual("10.43.211.63", hwm.hwm_ip, "add-hwm failed") - - def test_update_hwm(self): - update_hwm_meta = {'hwm_ip': '10.43.174.11'} - add_hwm = self.add_hwm(**self.hwm_meta) - update_hwm = self.update_hwm(add_hwm.id, **update_hwm_meta) - - self.assertEqual("10.43.174.11", update_hwm.hwm_ip, - "update-hwm failed") - - def test_hwm_detail_info(self): - add_hwm = self.add_hwm(**self.hwm_meta) - hwm_detail = self.get_hwm_detail(add_hwm.id) - self.assertEqual("10.43.211.63", hwm_detail.hwm_ip, - "test_hwm_detail_info failed") - - def test_hwm_list(self): - self.add_hwm(**self.hwm_meta) - hwms = self.list_hwm() - for hwm in hwms: - self.assertTrue(hwm is not None) - - def test_hwm_delete(self): - hwm = self.add_hwm(**self.hwm_meta) - self.delete_hwm(hwm.id) - - def tearDown(self): - self._clean_all_hwm() - super(DaisyHwmTest, self).tearDown()