delete none-core code from core code

Change-Id: I97039465a078fde8586c9231d7afa77ff95a10cb
This commit is contained in:
Zhou Ya 2016-11-10 21:29:08 +08:00
parent 1fe1b5d854
commit a530e1a95a
13 changed files with 757 additions and 371 deletions

View File

@ -198,6 +198,8 @@ update ".data_name = \"baremetal_source\""
update ".os_status = \"active\""
update ".discover_mode = \"SSH\""
echo Collected:
cat data.json

View File

@ -32,6 +32,8 @@ from daisy import i18n
from daisy.common import utils
from daisy.common import exception
import daisy.registry.client.v1.api as registry
from daisy.api.backends.osinstall import osdriver
import ConfigParser
import copy
import fcntl
import json
@ -103,6 +105,23 @@ service_map = {
'nova-cells': 'openstack-nova-cells',
'camellia-api': 'camellia-api'
}
config = ConfigParser.ConfigParser()
config.read(daisy_conf_file)
try:
OS_INSTALL_TYPE = config.get("OS", "os_install_type")
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
OS_INSTALL_TYPE = 'pxe'
_OS_HANDLE = None
def get_os_handle():
global _OS_HANDLE
if _OS_HANDLE is not None:
return _OS_HANDLE
_OS_HANDLE = osdriver.load_install_os_driver(OS_INSTALL_TYPE)
return _OS_HANDLE
def list_2_file(f, cluster_list):
@ -273,9 +292,11 @@ def get_role_hosts(req, role_id):
def delete_role_hosts(req, role_id):
try:
registry.delete_role_host_metadata(req.context, role_id)
role_hosts = registry.get_role_host_metadata(
req.context, role_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return role_hosts
def set_role_status_and_progress(req, cluster_id, opera, status,
@ -803,11 +824,9 @@ def _judge_ssh_host(req, host_id):
kwargs = {}
nodes = registry.get_hosts_detail(req.context, **kwargs)
for node in nodes:
if node.get("hwm_id"):
check_discover_state_with_hwm(req, node)
else:
check_discover_state_with_no_hwm(req, node)
os_handle = get_os_handle()
os_handle.check_discover_state(req,
node)
if node['discover_state'] and \
'SSH:DISCOVERY_SUCCESSFUL' in node['discover_state']:
if host_id == node['id']:

View File

@ -17,10 +17,11 @@
/install endpoint for kolla API
"""
from oslo_log import log as logging
import subprocess
from daisy import i18n
from daisy.api.backends import driver
import daisy.api.backends.kolla.install as instl
import daisy.api.backends.common as daisy_cmn
LOG = logging.getLogger(__name__)
@ -49,5 +50,162 @@ class API(driver.DeploymentDriver):
kolla_install_task = instl.KOLLAInstallTask(req, cluster_id)
kolla_install_task.start()
def update_progress_to_db(self, req, update_info, discover_host_meta):
discover = {}
discover['status'] = update_info['status']
discover['message'] = update_info['message']
if update_info.get('host_id'):
discover['host_id'] = update_info['host_id']
LOG.info("discover:%s", discover)
registry.update_discover_host_metadata(req.context,
discover_host_meta['id'],
discover)
def _check_uninstall_hosts(self, req, cluster_id, uninstall_hosts):
pass
def prepare_ssh_discovered_node(self, req, fp, discover_host_meta):
try:
trustme_result = subprocess.check_output(
'/var/lib/daisy/kolla/trustme.sh %s %s' %
(discover_host_meta['ip'], discover_host_meta['passwd']),
shell=True, stderr=subprocess.STDOUT)
if 'Permission denied' in trustme_result:
# when passwd was wrong
update_info = {}
update_info['status'] = 'DISCOVERY_FAILED'
update_info['message'] = "Passwd was wrong, do" \
"trustme.sh %s failed!"\
% discover_host_meta['ip']
self.update_progress_to_db(req, update_info,
discover_host_meta)
msg = (_("Do trustme.sh %s failed!" %
discover_host_meta['ip']))
LOG.warn(_(msg))
fp.write(msg)
elif 'is unreachable' in trustme_result:
# when host ip was unreachable
update_info = {}
update_info['status'] = 'DISCOVERY_FAILED'
update_info['message'] = "Host ip was unreachable," \
" do trustme.sh %s failed!" %\
discover_host_meta['ip']
self.update_progress_to_db(req, update_info,
discover_host_meta)
msg = (_("Do trustme.sh %s failed!" %
discover_host_meta['ip']))
LOG.warn(_(msg))
except subprocess.CalledProcessError as e:
update_info = {}
update_info['status'] = 'DISCOVERY_FAILED'
msg = "discover host for %s failed! raise CalledProcessError" \
" when execute trustme.sh." % discover_host_meta['ip']
update_info['message'] = msg
self.update_progress_to_db(
req, update_info, discover_host_meta)
LOG.error(_(msg))
fp.write(e.output.strip())
return
except:
update_info = {}
update_info['status'] = 'DISCOVERY_FAILED'
update_info['message'] = "discover host for %s failed!" \
% discover_host_meta['ip']
self.update_progress_to_db(
req, update_info, discover_host_meta)
LOG.error(_("discover host for %s failed!"
% discover_host_meta['ip']))
fp.write("discover host for %s failed!"
% discover_host_meta['ip'])
return
try:
cmd = 'clush -S -b -w %s "rm -rf /home/daisy/discover_host"'\
% (discover_host_meta['ip'],)
daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -w %s "mkdir -p /home/daisy/discover_host"'\
% (discover_host_meta['ip'],)
daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -w %s "chmod 777 /home/daisy/discover_host"'\
% (discover_host_meta['ip'],)
daisy_cmn.subprocess_call(cmd, fp)
except subprocess.CalledProcessError as e:
update_info = {}
update_info['status'] = 'DISCOVERY_FAILED'
msg = "raise CalledProcessError when execute cmd for host %s."\
% discover_host_meta['ip']
update_info['message'] = msg
self.update_progress_to_db(
req, update_info, discover_host_meta)
LOG.error(_(msg))
fp.write(e.output.strip())
return
try:
subprocess.check_output(
'clush -S -w %s -c /var/lib/daisy/kolla/getnodeinfo.sh '
'--dest=/home/daisy/discover_host' %
(discover_host_meta['ip'],),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_info = {}
update_info['status'] = 'DISCOVERY_FAILED'
update_info['message'] = "scp getnodeinfo.sh" \
" failed!" % discover_host_meta['ip']
self.update_progress_to_db(req, update_info,
discover_host_meta)
LOG.error(_("scp getnodeinfo.sh for %s failed!"
% discover_host_meta['ip']))
fp.write(e.output.strip())
return
try:
subprocess.check_output(
'clush -S -w %s yum install -y epel-release'
% (discover_host_meta['ip'],),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_info = {}
update_info['status'] = 'DISCOVERY_FAILED'
update_info['message'] = \
"creat repo epel for %s failed!"\
% discover_host_meta['ip']
self.update_progress_to_db(req, update_info,
discover_host_meta)
LOG.error(_("creat repo epel for %s failed!"
% discover_host_meta['ip']))
fp.write(e.output.strip())
return
try:
subprocess.check_output(
'clush -S -w %s yum install -y jq'
% (discover_host_meta['ip'],),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_info = {}
update_info['status'] = 'DISCOVERY_FAILED'
update_info['message'] = \
"install jq rpm for %s failed!"\
% discover_host_meta['ip']
self.update_progress_to_db(req, update_info,
discover_host_meta)
LOG.error(_("install jq rpm for %s failed!"
% discover_host_meta['ip']))
fp.write(e.output.strip())
return
def getnodeinfo_ip(self, daisy_management_ip):
cmd = 'dhcp_linenumber=`grep -n "dhcp_ip="' \
' /var/lib/daisy/kolla/getnodeinfo.sh|cut -d ":" -f 1` && ' \
'sed -i "${dhcp_linenumber}c dhcp_ip=\'%s\'" ' \
'/var/lib/daisy/kolla/getnodeinfo.sh' \
% (daisy_management_ip,)
daisy_cmn.subprocess_call(cmd)
def getnodeinfo_listen_port(self, listen_port):
cmd = 'port_linenumber=`grep -n "listen_port="' \
' /var/lib/daisy/kolla/getnodeinfo.sh|cut -d ":" -f 1` && ' \
'sed -i "${port_linenumber}c listen_port=\'%s\'" ' \
'/var/lib/daisy/kolla/getnodeinfo.sh' % (listen_port,)
daisy_cmn.subprocess_call(cmd)

View File

@ -37,7 +37,8 @@ from daisy import i18n
from daisy import notifier
import daisy.registry.client.v1.api as registry
from daisy.registry.api.v1 import template
from daisy.api.backends.osinstall import osdriver
import ConfigParser
import daisy.api.backends.common as daisy_cmn
LOG = logging.getLogger(__name__)
@ -53,6 +54,23 @@ CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
config = ConfigParser.ConfigParser()
config.read(daisy_cmn.daisy_conf_file)
try:
OS_INSTALL_TYPE = config.get("OS", "os_install_type")
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
OS_INSTALL_TYPE = 'pxe'
_OS_HANDLE = None
def get_os_handle():
global _OS_HANDLE
if _OS_HANDLE is not None:
return _OS_HANDLE
_OS_HANDLE = osdriver.load_install_os_driver(OS_INSTALL_TYPE)
return _OS_HANDLE
class Controller(controller.BaseController):
@ -262,10 +280,9 @@ class Controller(controller.BaseController):
kwargs = {}
nodes = registry.get_hosts_detail(req.context, **kwargs)
for node in nodes:
if node.get("hwm_id"):
daisy_cmn.check_discover_state_with_hwm(req, node)
else:
daisy_cmn.check_discover_state_with_no_hwm(req, node)
os_handle = get_os_handle()
os_handle.check_discover_state(req,
node)
ssh_hosts_list = []
for node in nodes:
if node['discover_state'] and 'SSH' in node['discover_state']:

825
code/daisy/daisy/api/v1/hosts.py Executable file → Normal file

File diff suppressed because it is too large Load Diff

View File

@ -364,6 +364,9 @@ class Controller(controller.BaseController):
req, cluster_id, BACKENDS_UNINSTALL_ORDER)
for backend in backends:
backend_driver = driver.load_deployment_dirver(backend)
backend_driver._check_uninstall_hosts(req,
cluster_id,
uninstall_hosts)
uninstall_thread = Thread(
target=backend_driver.uninstall, args=(
req, cluster_id, uninstall_hosts))

View File

@ -350,13 +350,6 @@ def get_host_meta(response):
return result
def get_hwm_meta(response):
result = {}
for key, value in response.json.items():
result[key] = value
return result
def get_cluster_meta(response):
result = {}
for key, value in response.json.items():
@ -1211,8 +1204,3 @@ def get_numa_node_from_cpus(numa, str_cpus):
numa_nodes.sort()
return numa_nodes
def get_provider_client(hwm_ip):
endpoint = "http://" + hwm_ip + ":8089"
args = {'version': 1.0, 'endpoint': endpoint}
return provider_client.Client(**args)

View File

@ -29,19 +29,26 @@ UPDATE_PARAMS = ('name', 'resource_type', 'dmi_uuid', 'role', 'cluster',
'os_status', 'interfaces', 'is_deployment',
'description', 'deleted', 'status', 'ipmi_user',
'ipmi_passwd', 'ipmi_addr', 'ip', 'status', 'user',
'passwd', 'cluster_id',
'passwd', 'hwm_id', 'hwm_ip', 'cluster_id',
'vcpu_pin_set', 'dvs_high_cpuset', 'pci_high_cpuset',
'os_cpus', 'dvs_cpus', 'config_set_id', 'system',
'cpu', 'memory', 'disks', 'devices', 'pci')
'os_cpus', 'dvs_cpus', 'config_set_id', 'discover_state',
'group_list', 'version_patch_id', 'tecs_version_id',
'dvs_config_type', 'dvsc_cpus', 'dvsp_cpus', 'dvsv_cpus',
'dvsblank_cpus', 'flow_mode', 'virtio_queue_size',
'dvs_config_desc', 'discover_mode', 'system', 'cpu',
'memory', 'disks', 'devices', 'pci')
CREATE_PARAMS = ('id', 'name', 'description', 'resource_type', 'dmi_uuid',
'role', 'cluster', 'os_version', 'os_status',
'interfaces', 'is_deployment', 'status', 'ipmi_user',
'ipmi_passwd', 'ipmi_addr', 'ip', 'status', 'user',
'passwd', 'cluster_id',
'passwd', 'hwm_id', 'hwm_ip', 'cluster_id',
'vcpu_pin_set', 'dvs_high_cpuset', 'pci_high_cpuset',
'os_cpus', 'dvs_cpus', 'config_set_id', 'system',
'cpu', 'memory', 'disks', 'devices', 'pci')
'os_cpus', 'dvs_cpus', 'config_set_id', 'root_disk',
'version_patch_id', 'tecs_version_id', 'dvs_config_type',
'discover_mode', 'system', 'cpu', 'memory', 'disks', 'devices',
'pci')
DEFAULT_PAGE_SIZE = 200

View File

@ -60,6 +60,8 @@ def _daisy_show(daisy, max_column_width=80):
help='node name to be added.')
@utils.arg('description', metavar='<DESCRIPTION>',
help='node description to be added.')
@utils.arg('discover_mode', metavar='<DISCOVER_MODE>',
help='node discover mode(PXE/SSH) to be added.')
@utils.arg('--resource-type', metavar='<RESOURCE_TYPE>',
help='node resource type to be added, supported type are \
"baremetal", "server" and "docker".\

View File

@ -72,8 +72,16 @@ def api_continue():
hostname = data.pop('hostname')
else:
hostname = None
if data_name == "baremetal_source":
process.write_data_to_daisy(data, ipmi_addr, os_status, hostname)
if 'discover_mode' in data.keys():
discover_mode = data.pop('discover_mode')
else:
discover_mode = None
try:
if data_name == "baremetal_source":
process.write_data_to_daisy(data, ipmi_addr, os_status, hostname,
discover_mode)
except Exception as ex:
LOG.error("Write data to daisy failed: %s", ex)
return json.dumps(""), 200, {'Content-Type': 'applications/json'}

View File

@ -39,15 +39,17 @@ _POWER_CHECK_PERIOD = 5
_POWER_OFF_CHECK_PERIOD = 5
def write_data_to_daisy(node_info, ipmi_addr, os_status=None, hostname=None):
def write_data_to_daisy(node_info, ipmi_addr, os_status=None, hostname=None,
discover_mode=None):
daisy_client = utils.get_daisy_client()
daisy_data = format_node_info_for_daisy_client(node_info, ipmi_addr,
os_status, hostname)
os_status, hostname,
discover_mode)
daisy_client.hosts.add(**daisy_data)
def format_node_info_for_daisy_client(node_info, ipmi_addr,
os_status, hostname):
def format_node_info_for_daisy_client(node_info, ipmi_addr, os_status,
hostname, discover_mode):
interface_list = []
interfaces = node_info.get('interfaces', {})
for value in interfaces.values():
@ -65,18 +67,17 @@ def format_node_info_for_daisy_client(node_info, ipmi_addr,
'current_speed': value['current_speed'],
'netmask': value['netmask'],
'type': value['type'],
'slaves': slaves,
'slaves': slaves
}
interface_list.append(interface)
min_mac = find_min_mac_in_node_info(node_info)
unique_mac = ''.join(min_mac.split(":"))
daisy_data = {'description': 'default',
'name': unique_mac,
'name': '',
'ipmi_addr': ipmi_addr,
'interfaces': interface_list,
'os_status': 'init',
'dmi_uuid': node_info.get('system').get('uuid', None),
'discover_mode': discover_mode if discover_mode else 'PXE',
'system': node_info.get('system'),
'cpu': node_info.get('cpu'),
'memory': node_info.get('memory'),

View File

@ -16,6 +16,7 @@ from oslo_log import log as logging
from tempest import config
import tempest.test
from daisyclient.v1 import client as daisy_client
from tempest.api.daisy.v1.fake.logical_network_fake import FakeDiscoverHosts
CONF = config.CONF
@ -459,15 +460,6 @@ class BaseDaisyTest(tempest.test.BaseTestCase):
disk_array.service_disk_detail(service_disk_id)
return service_disk_detail
@classmethod
def _clean_all_physical_node(self):
physical_node_list_generator = self.ironic_client.physical_node.list()
physical_node_list = [physical_node for physical_node
in physical_node_list_generator]
if physical_node_list:
for physical_node in physical_node_list:
self.ironic_client.physical_node.delete(physical_node.uuid)
@classmethod
def template_add(self, **template):
template = self.daisy_client.template.add(**template)
@ -537,3 +529,7 @@ class BaseDaisyTest(tempest.test.BaseTestCase):
def delete_host_template(self, **kwargs):
template = self.daisy_client.template.delete_host_template(**kwargs)
return template
@classmethod
def add_fake_node(cls, num):
return cls.daisy_client.hosts.add(**FakeDiscoverHosts.daisy_data[num])

View File

@ -187,7 +187,7 @@ class TecsClusterTest(base.BaseDaisyTest):
self.delete_cluster(cluster_info.id)
def test_add_cluster_with_nodes(self):
host_info = self.add_host(**self.host_meta)
host_info = self.add_fake_node(0)
nodes = []
nodes.append(host_info.id)
self.cluster_meta1['nodes'] = nodes
@ -222,7 +222,7 @@ class TecsClusterTest(base.BaseDaisyTest):
self.delete_cluster(cluster_info.id)
def test_update_cluster_with_nodes(self):
host_info = self.add_host(**self.host_meta)
host_info = self.add_fake_node(0)
nodes = []
nodes.append(host_info.id)
self.private_network_add()