Merge "add template_extra function"
This commit is contained in:
commit
1fe1b5d854
@ -20,16 +20,20 @@ import os
|
||||
import subprocess
|
||||
import time
|
||||
import re
|
||||
import commands
|
||||
import socket
|
||||
import netaddr
|
||||
from oslo_log import log as logging
|
||||
from webob.exc import HTTPBadRequest
|
||||
from webob.exc import HTTPForbidden
|
||||
from webob.exc import HTTPNotFound
|
||||
from webob.exc import HTTPForbidden
|
||||
from daisy import i18n
|
||||
from daisy.common import utils
|
||||
|
||||
from daisy.common import utils
|
||||
from daisy.common import exception
|
||||
import daisy.registry.client.v1.api as registry
|
||||
import copy
|
||||
import fcntl
|
||||
import json
|
||||
|
||||
STR_MASK = '*' * 8
|
||||
@ -46,7 +50,7 @@ zenic_backend_name = "zenic"
|
||||
proton_backend_name = "proton"
|
||||
kolla_backend_name = "kolla"
|
||||
os_install_start_time = 0.0
|
||||
|
||||
cluster_list_file = "/var/lib/daisy/cluster-list"
|
||||
BACKEND_STATE = {
|
||||
'INIT': 'init',
|
||||
'INSTALLING': 'installing',
|
||||
@ -101,20 +105,72 @@ service_map = {
|
||||
}
|
||||
|
||||
|
||||
def list_2_file(f, cluster_list):
|
||||
f.seek(0)
|
||||
for cluster_id in cluster_list:
|
||||
f.write(cluster_id+"\n")
|
||||
|
||||
|
||||
def file_2_list(f, cluster_list):
|
||||
f.seek(0)
|
||||
cluster_ids = f.readlines()
|
||||
for cluster_id in cluster_ids:
|
||||
cluster_list.append(cluster_id.strip("\n"))
|
||||
|
||||
|
||||
def cluster_list_add(cluster_id):
|
||||
cluster_list = []
|
||||
with open(cluster_list_file, "a+") as f:
|
||||
fcntl.flock(f, fcntl.LOCK_EX)
|
||||
file_2_list(f, cluster_list)
|
||||
cluster_list.append(cluster_id)
|
||||
f.seek(0)
|
||||
f.truncate()
|
||||
list_2_file(f, cluster_list)
|
||||
fcntl.flock(f, fcntl.LOCK_UN)
|
||||
|
||||
|
||||
def cluster_list_delete(cluster_id):
|
||||
cluster_list = []
|
||||
with open(cluster_list_file, "a+") as f:
|
||||
fcntl.flock(f, fcntl.LOCK_EX)
|
||||
file_2_list(f, cluster_list)
|
||||
cluster_list.remove(cluster_id)
|
||||
f.seek(0)
|
||||
f.truncate()
|
||||
list_2_file(f, cluster_list)
|
||||
fcntl.flock(f, fcntl.LOCK_UN)
|
||||
|
||||
|
||||
def in_cluster_list(cluster_id):
|
||||
cluster_list = []
|
||||
with open(cluster_list_file, "a+") as f:
|
||||
fcntl.flock(f, fcntl.LOCK_EX)
|
||||
file_2_list(f, cluster_list)
|
||||
fcntl.flock(f, fcntl.LOCK_UN)
|
||||
return cluster_id in cluster_list
|
||||
|
||||
|
||||
def cluster_list_get():
|
||||
cluster_list = []
|
||||
with open(cluster_list_file, "a+") as f:
|
||||
fcntl.flock(f, fcntl.LOCK_EX)
|
||||
file_2_list(f, cluster_list)
|
||||
fcntl.flock(f, fcntl.LOCK_UN)
|
||||
return cluster_list
|
||||
|
||||
|
||||
def subprocess_call(command, file=None):
|
||||
if file:
|
||||
return_code = subprocess.call(command,
|
||||
shell=True,
|
||||
stdout=file,
|
||||
stderr=file)
|
||||
else:
|
||||
return_code = subprocess.call(command,
|
||||
shell=True,
|
||||
stdout=open('/dev/null', 'w'),
|
||||
stderr=subprocess.STDOUT)
|
||||
if return_code != 0:
|
||||
msg = "execute '%s' failed by subprocess call." % command
|
||||
raise exception.SubprocessCmdFailed(msg)
|
||||
try:
|
||||
subprocess.check_output(command,
|
||||
shell=True,
|
||||
stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
if file:
|
||||
file.write(e.output.strip())
|
||||
msg = "execute '%s' failed by subprocess call, "\
|
||||
"error message: %s." % (command, e.output.strip())
|
||||
raise exception.SubprocessCmdFailed(message=msg)
|
||||
|
||||
|
||||
def get_host_detail(req, host_id):
|
||||
@ -146,6 +202,15 @@ def get_cluster_roles_detail(req, cluster_id):
|
||||
return roles
|
||||
|
||||
|
||||
def get_cluster_hosts_list(req, cluster_id):
|
||||
try:
|
||||
params = {'cluster_id': cluster_id}
|
||||
hosts = registry.get_cluster_hosts(req.context, **params)
|
||||
except exception.Invalid as e:
|
||||
raise HTTPBadRequest(explanation=e.msg, request=req)
|
||||
return hosts
|
||||
|
||||
|
||||
def get_hosts_of_role(req, role_id):
|
||||
try:
|
||||
hosts = registry.get_role_host_metadata(req.context, role_id)
|
||||
@ -154,6 +219,14 @@ def get_hosts_of_role(req, role_id):
|
||||
return hosts
|
||||
|
||||
|
||||
def get_roles_of_host(req, host_id):
|
||||
try:
|
||||
roles = registry.get_host_roles_by_host_id(req.context, host_id)
|
||||
except exception.Invalid as e:
|
||||
raise HTTPBadRequest(explanation=e.msg, request=req)
|
||||
return roles
|
||||
|
||||
|
||||
def get_role_detail(req, role_id):
|
||||
try:
|
||||
role = registry.get_role_metadata(req.context, role_id)
|
||||
@ -181,9 +254,26 @@ def update_role(req, role_id, role_meta):
|
||||
raise HTTPBadRequest(explanation=e.msg, request=req)
|
||||
|
||||
|
||||
def update_role_host(req, role_id, role_host):
|
||||
def update_role_host(req, host_role_id, role_host):
|
||||
try:
|
||||
registry.update_role_host_metadata(req.context, role_id, role_host)
|
||||
registry.update_role_host_metadata(
|
||||
req.context, host_role_id, role_host)
|
||||
except exception.Invalid as e:
|
||||
raise HTTPBadRequest(explanation=e.msg, request=req)
|
||||
|
||||
|
||||
def get_role_hosts(req, role_id):
|
||||
try:
|
||||
role_hosts = registry.get_role_host_metadata(
|
||||
req.context, role_id)
|
||||
except exception.Invalid as e:
|
||||
raise HTTPBadRequest(explanation=e.msg, request=req)
|
||||
return role_hosts
|
||||
|
||||
|
||||
def delete_role_hosts(req, role_id):
|
||||
try:
|
||||
registry.delete_role_host_metadata(req.context, role_id)
|
||||
except exception.Invalid as e:
|
||||
raise HTTPBadRequest(explanation=e.msg, request=req)
|
||||
|
||||
@ -206,13 +296,6 @@ def set_role_status_and_progress(req, cluster_id, opera, status,
|
||||
update_role_host(req, role_host['id'], status)
|
||||
|
||||
|
||||
def delete_role_hosts(req, role_id):
|
||||
try:
|
||||
registry.delete_role_host_metadata(req.context, role_id)
|
||||
except exception.Invalid as e:
|
||||
raise HTTPBadRequest(explanation=e.msg, request=req)
|
||||
|
||||
|
||||
def get_cluster_networks_detail(req, cluster_id):
|
||||
try:
|
||||
networks = registry.get_networks_detail(req.context, cluster_id)
|
||||
@ -433,40 +516,39 @@ def calc_host_iqn(min_mac):
|
||||
return iqn
|
||||
|
||||
|
||||
def _get_cluster_network(cluster_networks, network_type):
|
||||
network = [cn for cn in cluster_networks if cn['name'] in network_type]
|
||||
def _get_cluster_network(cluster_networks, network_name):
|
||||
network = [cn for cn in cluster_networks if cn['name'] == network_name]
|
||||
if not network or not network[0]:
|
||||
msg = "network %s is not exist" % (network_type)
|
||||
msg = "network %s is not exist" % (network_name)
|
||||
raise exception.InvalidNetworkConfig(msg)
|
||||
else:
|
||||
return network[0]
|
||||
|
||||
|
||||
def get_host_interface_by_network(host_detail, network_type):
|
||||
def get_host_interface_by_network(host_detail, network_name):
|
||||
host_detail_info = copy.deepcopy(host_detail)
|
||||
interface_list = [hi for hi in host_detail_info['interfaces']
|
||||
for assigned_network in hi['assigned_networks']
|
||||
if assigned_network and
|
||||
network_type == assigned_network['name']]
|
||||
network_name == assigned_network['name']]
|
||||
interface = {}
|
||||
if interface_list:
|
||||
interface = interface_list[0]
|
||||
if not interface:
|
||||
|
||||
if not interface and 'MANAGEMENT' == network_name:
|
||||
msg = "network %s of host %s is not exist" % (
|
||||
network_type, host_detail_info['id'])
|
||||
network_name, host_detail_info['id'])
|
||||
raise exception.InvalidNetworkConfig(msg)
|
||||
|
||||
return interface
|
||||
|
||||
|
||||
def get_host_network_ip(req, host_detail, cluster_networks, network_name):
|
||||
interface_network_ip = ''
|
||||
host_interface = get_host_interface_by_network(host_detail, network_name)
|
||||
if host_interface:
|
||||
network = _get_cluster_network(cluster_networks, network_name)
|
||||
assigned_network = get_assigned_network(req,
|
||||
host_interface['id'],
|
||||
network['id'])
|
||||
interface_network_ip = assigned_network['ip']
|
||||
for host_interface in host_detail.get('interfaces', []):
|
||||
for assigned_network in host_interface.get('assigned_networks', []):
|
||||
if assigned_network.get('name') == network_name:
|
||||
return assigned_network.get('ip')
|
||||
|
||||
if not interface_network_ip and 'MANAGEMENT' == network_name:
|
||||
msg = "%s network ip of host %s can't be empty" % (
|
||||
@ -512,7 +594,7 @@ def sort_interfaces_by_pci(networks, host_detail):
|
||||
|
||||
for interface in interfaces:
|
||||
if interface.get('name') not in slaves_name_list:
|
||||
vlan_id_len_list = [len(network['vlan_id'])
|
||||
vlan_id_len_list = [len(str(network['vlan_id']))
|
||||
for assigned_network in interface.get(
|
||||
'assigned_networks', [])
|
||||
for network in networks
|
||||
@ -597,27 +679,387 @@ def get_ctl_ha_nodes_min_mac(req, cluster_id):
|
||||
'''
|
||||
ctl_ha_nodes_min_mac = {}
|
||||
roles = get_cluster_roles_detail(req, cluster_id)
|
||||
cluster_networks =\
|
||||
get_cluster_networks_detail(req, cluster_id)
|
||||
for role in roles:
|
||||
if role['deployment_backend'] != tecs_backend_name:
|
||||
continue
|
||||
role_hosts = get_hosts_of_role(req, role['id'])
|
||||
if role['name'] == "CONTROLLER_HA":
|
||||
role_hosts = get_hosts_of_role(req, role['id'])
|
||||
for role_host in role_hosts:
|
||||
# host has installed tecs are exclusive
|
||||
if (role_host['status'] == BACKEND_STATE['ACTIVE'] or
|
||||
role_host['status'] == BACKEND_STATE['UPDATING'] or
|
||||
role_host['status'] == BACKEND_STATE['UPDATE_FAILED']):
|
||||
continue
|
||||
host_detail = get_host_detail(req,
|
||||
role_host['host_id'])
|
||||
host_name = host_detail['name']
|
||||
if role['name'] == "CONTROLLER_HA":
|
||||
min_mac = utils.get_host_min_mac(host_detail['interfaces'])
|
||||
ctl_ha_nodes_min_mac[host_name] = min_mac
|
||||
min_mac = utils.get_host_min_mac(host_detail['interfaces'])
|
||||
ctl_ha_nodes_min_mac[host_name] = min_mac
|
||||
return ctl_ha_nodes_min_mac
|
||||
|
||||
|
||||
def update_db_host_status(req, host_id, host_status, version_id=None,
|
||||
version_patch_id=None):
|
||||
"""
|
||||
Update host status and intallation progress to db.
|
||||
:return:
|
||||
"""
|
||||
try:
|
||||
host_meta = {}
|
||||
if host_status.get('os_progress', None):
|
||||
host_meta['os_progress'] = host_status['os_progress']
|
||||
if host_status.get('os_status', None):
|
||||
host_meta['os_status'] = host_status['os_status']
|
||||
if host_status.get('messages', None):
|
||||
host_meta['messages'] = host_status['messages']
|
||||
if host_status.has_key('tecs_version_id'):
|
||||
host_meta['tecs_version_id'] = host_status['tecs_version_id']
|
||||
if version_id:
|
||||
host_meta['os_version_id'] = version_id
|
||||
if version_patch_id:
|
||||
host_meta['version_patch_id'] = version_patch_id
|
||||
hostinfo = registry.update_host_metadata(req.context,
|
||||
host_id,
|
||||
host_meta)
|
||||
return hostinfo
|
||||
except exception.Invalid as e:
|
||||
raise HTTPBadRequest(explanation=e.msg, request=req)
|
||||
|
||||
|
||||
def get_local_deployment_ip(tecs_deployment_ips):
|
||||
(status, output) = commands.getstatusoutput('ifconfig')
|
||||
netcard_pattern = re.compile('\S*: ')
|
||||
ip_str = '([0-9]{1,3}\.){3}[0-9]{1,3}'
|
||||
pattern = re.compile(ip_str)
|
||||
nic_ip = {}
|
||||
for netcard in re.finditer(netcard_pattern, str(output)):
|
||||
nic_name = netcard.group().split(': ')[0]
|
||||
if nic_name == "lo":
|
||||
continue
|
||||
ifconfig_nic_cmd = "ifconfig %s" % nic_name
|
||||
(status, output) = commands.getstatusoutput(ifconfig_nic_cmd)
|
||||
if status:
|
||||
continue
|
||||
ip = pattern.search(str(output))
|
||||
if ip and ip.group() != "127.0.0.1":
|
||||
nic_ip[nic_name] = ip.group()
|
||||
|
||||
deployment_ip = ''
|
||||
for nic in nic_ip.keys():
|
||||
if nic_ip[nic] in tecs_deployment_ips:
|
||||
deployment_ip = nic_ip[nic]
|
||||
break
|
||||
return deployment_ip
|
||||
|
||||
|
||||
def whether_insl_backends(req, host_ids_failed):
|
||||
# after os installed, host_ids_failed are ids of host installed failed
|
||||
# if host installed failed is CONTROLLER_LB host or CONTROLLER_HA host
|
||||
# continue_installing_backends is false ,stop installing backends
|
||||
continue_installing_backends = True
|
||||
if not host_ids_failed:
|
||||
return continue_installing_backends
|
||||
for host_id_failed in host_ids_failed:
|
||||
host_failed_info = get_host_detail(req, host_id_failed)
|
||||
roles_of_host = host_failed_info['role']
|
||||
if "CONTROLLER_HA" in roles_of_host or "CONTROLLER_LB" \
|
||||
in roles_of_host:
|
||||
continue_installing_backends = False
|
||||
return continue_installing_backends
|
||||
return continue_installing_backends
|
||||
|
||||
|
||||
def whether_insl_tecs_aft_ping(unreached_hosts, ha_ip_set,
|
||||
lb_ip_set):
|
||||
continue_installing_tecs = True
|
||||
ha_ip_set = set(ha_ip_set)
|
||||
lb_ip_set = set(lb_ip_set)
|
||||
controller_ips = (ha_ip_set | lb_ip_set)
|
||||
if not unreached_hosts:
|
||||
return continue_installing_tecs
|
||||
for unreached_host in unreached_hosts:
|
||||
if unreached_host in controller_ips:
|
||||
continue_installing_tecs = False
|
||||
return continue_installing_tecs
|
||||
return continue_installing_tecs
|
||||
|
||||
|
||||
def get_management_ip(host_detail, is_throw_exception=True):
|
||||
host_management_ip = ''
|
||||
for interface in host_detail['interfaces']:
|
||||
if ('assigned_networks' in interface and
|
||||
interface['assigned_networks']):
|
||||
for as_network in interface['assigned_networks']:
|
||||
if ((as_network.get('name', '') == 'MANAGEMENT'
|
||||
or as_network.get('network_type', '') == 'MANAGEMENT')
|
||||
and 'ip' in as_network):
|
||||
host_management_ip = as_network['ip']
|
||||
|
||||
if not host_management_ip and is_throw_exception:
|
||||
msg = "Can't find management ip for host %s"\
|
||||
% host_detail['id']
|
||||
LOG.error(msg)
|
||||
raise HTTPBadRequest(explanation=msg)
|
||||
return host_management_ip
|
||||
|
||||
|
||||
def _judge_ssh_host(req, host_id):
|
||||
ssh_host_flag = False
|
||||
kwargs = {}
|
||||
nodes = registry.get_hosts_detail(req.context, **kwargs)
|
||||
for node in nodes:
|
||||
if node.get("hwm_id"):
|
||||
check_discover_state_with_hwm(req, node)
|
||||
else:
|
||||
check_discover_state_with_no_hwm(req, node)
|
||||
|
||||
if node['discover_state'] and \
|
||||
'SSH:DISCOVERY_SUCCESSFUL' in node['discover_state']:
|
||||
if host_id == node['id']:
|
||||
ssh_host_flag = True
|
||||
break
|
||||
return ssh_host_flag
|
||||
|
||||
|
||||
def check_discover_state_with_hwm(req, node, is_detail=False):
|
||||
node['discover_state'] = None
|
||||
if node.get("discover_mode"):
|
||||
node['discover_state'] = node['discover_mode'] + \
|
||||
":DISCOVERY_SUCCESSFUL"
|
||||
return node
|
||||
if is_detail:
|
||||
host_interfaces = node.get('interfaces')
|
||||
else:
|
||||
host_interfaces = registry.get_host_interface_by_host_id(
|
||||
req.context, node.get('id'))
|
||||
if host_interfaces:
|
||||
mac_list = [interface['mac'] for interface in host_interfaces if
|
||||
interface.get('mac')]
|
||||
if mac_list:
|
||||
min_mac = min(mac_list)
|
||||
pxe_discover_host = _get_discover_host_by_mac(req, min_mac)
|
||||
if pxe_discover_host:
|
||||
if pxe_discover_host.get('ip'):
|
||||
node['discover_state'] = \
|
||||
"SSH:" + pxe_discover_host.get('status')
|
||||
else:
|
||||
node['discover_state'] = \
|
||||
"PXE:" + pxe_discover_host.get('status')
|
||||
|
||||
return node
|
||||
|
||||
|
||||
def check_discover_state_with_no_hwm(req, node, is_detail=False):
|
||||
node['discover_state'] = None
|
||||
if node.get("discover_mode"):
|
||||
node['discover_state'] = node['discover_mode'] + \
|
||||
":DISCOVERY_SUCCESSFUL"
|
||||
return node
|
||||
if is_detail:
|
||||
host_interfaces = node.get('interfaces')
|
||||
else:
|
||||
host_interfaces = registry.get_host_interface_by_host_id(
|
||||
req.context, node.get('id'))
|
||||
if host_interfaces:
|
||||
ip_list = [interface['ip'] for interface in host_interfaces if
|
||||
interface['ip']]
|
||||
for ip in ip_list:
|
||||
ssh_discover_host = _get_discover_host_filter_by_ip(
|
||||
req, ip)
|
||||
if ssh_discover_host:
|
||||
node['discover_state'] = \
|
||||
"SSH:" + ssh_discover_host.get('status')
|
||||
|
||||
return node
|
||||
|
||||
|
||||
def _get_discover_host_by_mac(req, host_mac):
|
||||
params = dict()
|
||||
discover_hosts = \
|
||||
registry.get_discover_hosts_detail(req.context, **params)
|
||||
LOG.info("%s" % discover_hosts)
|
||||
for host in discover_hosts:
|
||||
if host.get('mac') == host_mac:
|
||||
return host
|
||||
return
|
||||
|
||||
|
||||
def _get_discover_host_filter_by_ip(req, host_ip):
|
||||
params = {}
|
||||
discover_hosts = \
|
||||
registry.get_discover_hosts_detail(req.context, **params)
|
||||
LOG.debug("%s" % discover_hosts)
|
||||
for host in discover_hosts:
|
||||
if host.get('ip') == host_ip:
|
||||
return host
|
||||
return
|
||||
|
||||
|
||||
def add_ssh_host_to_cluster_and_assigned_network(req, cluster_id, host_id):
|
||||
if cluster_id:
|
||||
host_list = []
|
||||
father_vlan_list = []
|
||||
discover_successful = 0
|
||||
host_info = get_host_detail(req, host_id)
|
||||
host_status = host_info.get('status',None)
|
||||
if host_status != 'init':
|
||||
interfac_meta_list=host_info.get('interfaces',None)
|
||||
for interface_info in interfac_meta_list:
|
||||
assigned_networks = interface_info.get\
|
||||
('assigned_networks', None)
|
||||
if assigned_networks:
|
||||
discover_successful = 1
|
||||
if not discover_successful:
|
||||
host_list.append(host_id)
|
||||
|
||||
if host_list:
|
||||
params = {'filters': {'cluster_id': cluster_id}}
|
||||
networks = registry.get_networks_detail(req.context,
|
||||
cluster_id, **params)
|
||||
father_vlan_list=check_vlan_nic_and_join_vlan_network\
|
||||
(req, cluster_id, host_list, networks)
|
||||
check_bond_or_ether_nic_and_join_network\
|
||||
(req, cluster_id, host_list, networks, father_vlan_list)
|
||||
|
||||
|
||||
def check_vlan_nic_and_join_vlan_network(req, cluster_id,
|
||||
host_list, networks):
|
||||
father_vlan_list = []
|
||||
for host_id in host_list:
|
||||
host_meta_detail = get_host_detail(req, host_id)
|
||||
if host_meta_detail.has_key('interfaces'):
|
||||
interfac_list = host_meta_detail.get('interfaces',None)
|
||||
for interface_info in interfac_list:
|
||||
host_ip = interface_info.get('ip',None)
|
||||
if interface_info['type'] == 'vlan' and host_ip:
|
||||
check_ip_if_valid = \
|
||||
_checker_the_ip_or_hostname_valid(host_ip)
|
||||
if not check_ip_if_valid:
|
||||
msg = "Error:The %s is not the right ip!" % host_ip
|
||||
LOG.error(msg)
|
||||
raise exception.Forbidden(msg)
|
||||
nic_name = interface_info['name'].split('.')[0]
|
||||
vlan_id = interface_info['name'].split('.')[1]
|
||||
for network in networks:
|
||||
if network['network_type'] in ['DATAPLANE',
|
||||
'EXTERNAL']:
|
||||
continue
|
||||
network_cidr = network.get('cidr', None)
|
||||
if network_cidr:
|
||||
ip_in_cidr = \
|
||||
utils.is_ip_in_cidr(host_ip,
|
||||
network['cidr'])
|
||||
if vlan_id == network['vlan_id']\
|
||||
and ip_in_cidr:
|
||||
father_vlan_list.append(
|
||||
{nic_name: {'name': network['name'],
|
||||
'ip': host_ip}})
|
||||
interface_info['assigned_networks'].\
|
||||
append({'name': network['name'],
|
||||
'ip': host_ip})
|
||||
LOG.info("add the nic %s of the host "
|
||||
"%s to assigned_network %s" %
|
||||
(interface_info['name'],
|
||||
host_id,
|
||||
interface_info
|
||||
['assigned_networks']))
|
||||
elif vlan_id == network['vlan_id'] \
|
||||
and not ip_in_cidr:
|
||||
msg = "The vlan of nic %s is the same " \
|
||||
"as network %s, but the ip of nic " \
|
||||
"is not in the cidr range." % \
|
||||
(nic_name, network['name'])
|
||||
LOG.error(msg)
|
||||
raise HTTPForbidden(explanation=msg)
|
||||
else:
|
||||
msg = "There is no cidr in network " \
|
||||
"%s" % network['name']
|
||||
LOG.error(msg)
|
||||
raise HTTPForbidden(explanation=msg)
|
||||
return father_vlan_list
|
||||
|
||||
|
||||
def _checker_the_ip_or_hostname_valid(ip_str):
|
||||
try:
|
||||
ip_lists = socket.gethostbyname_ex(ip_str)
|
||||
return True
|
||||
except Exception:
|
||||
if netaddr.IPAddress(ip_str).version == 6:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def check_bond_or_ether_nic_and_join_network(req, cluster_id, host_list, networks, father_vlan_list):
|
||||
for host_id in host_list:
|
||||
host_info = get_host_detail(req, host_id)
|
||||
if host_info.has_key('interfaces'):
|
||||
update_host_interface = 0
|
||||
interfac_meta_list = host_info.get('interfaces',None)
|
||||
for interface_info in interfac_meta_list:
|
||||
update_flag = 0
|
||||
host_info_ip = interface_info.get('ip',None)
|
||||
if interface_info['type'] != 'vlan':
|
||||
nic_name = interface_info['name']
|
||||
for nic in father_vlan_list:
|
||||
if nic.keys()[0] == nic_name:
|
||||
update_flag = 1
|
||||
update_host_interface = 1
|
||||
interface_info['assigned_networks']\
|
||||
.append(nic.values()[0])
|
||||
if update_flag:
|
||||
continue
|
||||
if host_info_ip:
|
||||
check_ip_if_valid = \
|
||||
_checker_the_ip_or_hostname_valid(host_info_ip)
|
||||
if not check_ip_if_valid:
|
||||
msg = "Error:The %s is not the right ip!"\
|
||||
% host_info_ip
|
||||
LOG.error(msg)
|
||||
raise exception.Forbidden(msg)
|
||||
for network in networks:
|
||||
if network['network_type'] in ['DATAPLANE',
|
||||
'EXTERNAL']:
|
||||
continue
|
||||
if network.get('cidr', None):
|
||||
ip_in_cidr = utils.is_ip_in_cidr\
|
||||
(host_info_ip, network['cidr'])
|
||||
if ip_in_cidr:
|
||||
vlan_id = network['vlan_id']
|
||||
if not vlan_id:
|
||||
update_host_interface = 1
|
||||
interface_info['assigned_networks'].\
|
||||
append({'name': network['name'],
|
||||
'ip': host_info_ip})
|
||||
LOG.info("add the nic %s of the "
|
||||
"host %s to "
|
||||
"assigned_network %s" %
|
||||
(nic_name,
|
||||
host_id,
|
||||
interface_info
|
||||
['assigned_networks']))
|
||||
else:
|
||||
msg = ("the nic %s of ip %s is in "
|
||||
"the %s cidr range,but the "
|
||||
"network vlan id is %s " %
|
||||
(nic_name,
|
||||
host_info_ip,
|
||||
network['name'], vlan_id))
|
||||
LOG.error(msg)
|
||||
raise HTTPForbidden(explation=msg)
|
||||
else:
|
||||
msg = "There is no cidr in network " \
|
||||
"%s" % network['name']
|
||||
LOG.error(msg)
|
||||
raise HTTPForbidden(explanation=msg)
|
||||
|
||||
if update_host_interface:
|
||||
host_meta={}
|
||||
host_meta['cluster'] = cluster_id
|
||||
host_meta['interfaces'] = str(interfac_meta_list)
|
||||
host_meta = registry.update_host_metadata(req.context,
|
||||
host_id,
|
||||
host_meta)
|
||||
LOG.info("add the host %s join the cluster %s and"
|
||||
" assigned_network successful" %
|
||||
(host_id, cluster_id))
|
||||
|
||||
|
||||
def build_pxe_server(eth_name, ip_address, build_pxe, net_mask,
|
||||
client_ip_begin, client_ip_end):
|
||||
"""build pxe server."""
|
||||
@ -637,7 +1079,7 @@ def build_pxe_server(eth_name, ip_address, build_pxe, net_mask,
|
||||
chmod 755 /tftpboot -R"
|
||||
try:
|
||||
obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE,
|
||||
stderr=_PIPE, shell=False)
|
||||
stderr=_PIPE, shell=True)
|
||||
obj.communicate()
|
||||
except Exception as e:
|
||||
msg = "build_pxe_server error: %s" % e
|
||||
@ -666,7 +1108,7 @@ def set_boot_or_power_state(user, passwd, addr, action):
|
||||
_PIPE = subprocess.PIPE
|
||||
try:
|
||||
obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE,
|
||||
stderr=_PIPE, shell=True)
|
||||
stderr=_PIPE, shell=False)
|
||||
obj.communicate()
|
||||
except Exception as e:
|
||||
msg = "%s set_boot_or_power_state error: %s" % (addr, e)
|
||||
|
@ -37,8 +37,8 @@ def load_install_os_driver(os_install_type):
|
||||
|
||||
LOG.info(_("Loading os driver '%s'") % os_installation_driver)
|
||||
try:
|
||||
driver = importutils.import_object_ns(
|
||||
'daisy.api.backends.osinstall', os_installation_driver)
|
||||
driver = importutils.import_module(
|
||||
'daisy.api.backends.osinstall.%s' % os_installation_driver)
|
||||
return driver
|
||||
except ImportError:
|
||||
LOG.exception(
|
||||
|
@ -17,13 +17,15 @@
|
||||
/install endpoint for daisy API
|
||||
"""
|
||||
import copy
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
import re
|
||||
import commands
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from webob.exc import HTTPBadRequest
|
||||
from daisy.api import common
|
||||
|
||||
import threading
|
||||
from daisy import i18n
|
||||
|
||||
from daisy.common import exception
|
||||
@ -31,7 +33,6 @@ from daisy.common import utils
|
||||
import daisy.registry.client.v1.api as registry
|
||||
import daisy.api.backends.common as daisy_cmn
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
_ = i18n._
|
||||
_LE = i18n._LE
|
||||
@ -168,7 +169,7 @@ def _get_network_plat(req, host_config, cluster_networks, dhcp_mac):
|
||||
network_name = assigned_network['name']
|
||||
cluster_network = [
|
||||
network for network in cluster_networks
|
||||
if network['name'] in network_name][0]
|
||||
if network['name'] == network_name][0]
|
||||
alias.append(cluster_network['alias'])
|
||||
# convert cidr to netmask
|
||||
cidr_to_ip = ""
|
||||
@ -239,10 +240,14 @@ def get_cluster_hosts_config(req, cluster_id):
|
||||
service_disks = daisy_cmn.get_service_disk_list(
|
||||
req, {'role_id': role['id']})
|
||||
for service_disk in service_disks:
|
||||
if service_disk['disk_location'] == 'local' and\
|
||||
service_disk['service'] == 'mongodb':
|
||||
host_detail['mongodb_lv_size'] = service_disk['size']
|
||||
break
|
||||
if service_disk['disk_location'] == 'local' \
|
||||
and role['name'] in host_detail['role']:
|
||||
if service_disk['service'] == 'mongodb':
|
||||
host_detail['mongodb_lv_size'] = \
|
||||
service_disk['size']
|
||||
if service_disk['service'] == 'provider':
|
||||
host_detail['provider_lv_size'] = \
|
||||
service_disk['size']
|
||||
if role_host_db_lv_size_lists:
|
||||
host_detail['db_lv_size'] = max(role_host_db_lv_size_lists)
|
||||
else:
|
||||
@ -276,21 +281,100 @@ def get_cluster_hosts_config(req, cluster_id):
|
||||
return hosts_config
|
||||
|
||||
|
||||
def update_db_host_status(req, host_id, host_status):
|
||||
"""
|
||||
Update host status and intallation progress to db.
|
||||
:return:
|
||||
"""
|
||||
def _rm_ipmi_failed_host(req, install_hosts):
|
||||
for install_host in install_hosts:
|
||||
host_info = daisy_cmn.get_host_detail(req, install_host['id'])
|
||||
if host_info["os_status"] == host_os_status["INSTALL_FAILED"]:
|
||||
install_host["os_status"] = host_os_status["INSTALL_FAILED"]
|
||||
install_hosts = [install_host for install_host in install_hosts if
|
||||
install_host["os_status"] != host_os_status[
|
||||
"INSTALL_FAILED"]]
|
||||
return install_hosts
|
||||
|
||||
|
||||
|
||||
def get_host_location_of_cisco(host_detail):
|
||||
LOG.info(_("Get location for host %s" % host_detail['id']))
|
||||
try:
|
||||
host_meta = {}
|
||||
host_meta['os_progress'] = host_status['os_progress']
|
||||
host_meta['os_status'] = host_status['os_status']
|
||||
host_meta['messages'] = host_status['messages']
|
||||
registry.update_host_metadata(req.context,
|
||||
host_id,
|
||||
host_meta)
|
||||
except exception.Invalid as e:
|
||||
raise HTTPBadRequest(explanation=e.msg, request=req)
|
||||
location_result = subprocess.check_output(
|
||||
'sshpass -p%s ssh -o StrictHostKeyChecking=no '
|
||||
'%s@10.10.100.254 "show identity ip-addr %s"' %
|
||||
(host_detail.get('ipmi_passwd'),
|
||||
host_detail.get('ipmi_user'),
|
||||
host_detail.get('ipmi_addr')), shell=True,
|
||||
stderr=subprocess.STDOUT)
|
||||
pattern = re.compile("chassis-(\d*)\/blade-(\d*)")
|
||||
res = pattern.search(location_result).groups()
|
||||
location = res[0] + '/' + res[1]
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.info(_("Get location for %s failed!" % host_detail['id']))
|
||||
return None
|
||||
else:
|
||||
LOG.info(_("Get location for %s successfully!" % host_detail['id']))
|
||||
return location
|
||||
|
||||
|
||||
def set_pxe_start_of_cisco(host_detail):
|
||||
LOG.info(_("Set pxe start for host %s" % (host_detail['id'])))
|
||||
try:
|
||||
exc_result = subprocess.check_output(
|
||||
'sshpass -p%s ssh -o StrictHostKeyChecking=no '
|
||||
'%s@10.10.100.254 "scope service-profile server %s;'
|
||||
'set boot-policy pxe;commit-buffer"' %
|
||||
(host_detail.get('ipmi_passwd'),
|
||||
host_detail.get('ipmi_user'),
|
||||
host_detail.get('location')), shell=True,
|
||||
stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.info(_("set pxe start for %s failed!" % host_detail['id']))
|
||||
return
|
||||
else:
|
||||
LOG.info(_("set pxe start for %s successfully!" %
|
||||
host_detail['id']))
|
||||
|
||||
|
||||
def set_reboot_of_cisco(host_detail):
|
||||
LOG.info(_("Set boot from disk for host %s" % (host_detail['id'])))
|
||||
try:
|
||||
exc_result = subprocess.check_output(
|
||||
'sshpass -p%s ssh -o StrictHostKeyChecking=no '
|
||||
'%s@10.10.100.254 "scope service-profile server %s;'
|
||||
'reboot;commit-buffer"' % (host_detail.get('ipmi_passwd'),
|
||||
host_detail.get('ipmi_user'), host_detail.get('location')),
|
||||
shell=True, stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.info(_("restart for %s failed!" % host_detail['id']))
|
||||
return
|
||||
else:
|
||||
LOG.info(_("restart for %s successfully!" %
|
||||
host_detail['id']))
|
||||
|
||||
|
||||
def set_disk_start_of_cisco(host_detail):
|
||||
LOG.info(_("Set boot from disk for host %s" % (host_detail['id'])))
|
||||
try:
|
||||
exc_result = subprocess.check_output(
|
||||
'sshpass -p%s ssh -o StrictHostKeyChecking=no '
|
||||
'%s@10.10.100.254 "scope service-profile server %s;'
|
||||
'set boot-policy local-disk;commit-buffer"' %
|
||||
(host_detail.get('ipmi_passwd'), host_detail.get('ipmi_user'),
|
||||
host_detail.get('location')), shell=True,
|
||||
stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.info(_("set disk start for %s failed!" % host_detail['id']))
|
||||
return
|
||||
else:
|
||||
LOG.info(_("set disk start for %s successfully!" %
|
||||
host_detail['id']))
|
||||
|
||||
|
||||
def _get_host_interfaces(host_info):
|
||||
interfaces = host_info['interfaces']
|
||||
for interface in interfaces:
|
||||
for assigned_network in interface['assigned_networks']:
|
||||
if assigned_network['network_type'] == 'DATAPLANE':
|
||||
assigned_network['ip'] = None
|
||||
return interfaces
|
||||
|
||||
|
||||
class OSInstall():
|
||||
@ -312,37 +396,104 @@ class OSInstall():
|
||||
self.cluster_hosts_install_timeout = (
|
||||
self.max_parallel_os_num / 4 + 2) * 60 * (12 * self.time_step)
|
||||
|
||||
def _set_boot_or_power_state(self, user, passwd, addr, action):
|
||||
def _set_boot_or_power_state(self, host_detail, action):
|
||||
user = host_detail['ipmi_user']
|
||||
passwd = host_detail['ipmi_passwd']
|
||||
addr = host_detail['ipmi_addr']
|
||||
count = 0
|
||||
repeat_times = 24
|
||||
repeat_times = 5
|
||||
ipmi_result_flag = True
|
||||
stop_flag = False
|
||||
while count < repeat_times:
|
||||
rc = daisy_cmn.set_boot_or_power_state(user, passwd, addr, action)
|
||||
if rc == 0:
|
||||
LOG.info(
|
||||
_("Set %s to '%s' successfully for %s times" % (
|
||||
_("Set %s to '%s' successfully for %s times by ironic" % (
|
||||
addr, action, count + 1)))
|
||||
host_status = {'messages': "Set %s to '%s' successfully for "
|
||||
"%s times by ironic" % (
|
||||
addr, action, count + 1)}
|
||||
daisy_cmn.update_db_host_status(self.req, host_detail['id'],
|
||||
host_status)
|
||||
# make user know set successfull
|
||||
time.sleep(1)
|
||||
host_status = {
|
||||
'messages': 'Preparing for OS installation',
|
||||
'os_progress': 0}
|
||||
daisy_cmn.update_db_host_status(self.req, host_detail['id'],
|
||||
host_status)
|
||||
|
||||
# One host set 'disk' return success, but it still 'pxe'
|
||||
# mode in German site. If we have a method to confirm,
|
||||
# this can be deleted.
|
||||
if action == 'pxe' or action == 'disk':
|
||||
daisy_cmn.set_boot_or_power_state(user, passwd, addr,
|
||||
action)
|
||||
break
|
||||
else:
|
||||
count += 1
|
||||
LOG.info(
|
||||
_("Try setting %s to '%s' failed for %s times"
|
||||
_("Try setting %s to '%s' failed for %s times by ironic"
|
||||
% (addr, action, count)))
|
||||
host_status = {'messages': "Set %s to '%s' failed for "
|
||||
"%s times by ironic" % (
|
||||
addr, action, count + 1)}
|
||||
daisy_cmn.update_db_host_status(self.req, host_detail['id'],
|
||||
host_status)
|
||||
time.sleep(count * 2)
|
||||
if count >= repeat_times:
|
||||
ipmi_result_flag = False
|
||||
message = "Set %s to '%s' failed for 10 mins" % (addr, action)
|
||||
raise exception.IMPIOprationFailed(message=message)
|
||||
if host_detail.get('role', None):
|
||||
role_of_host = host_detail['role']
|
||||
else:
|
||||
role_of_host = []
|
||||
if "CONTROLLER_HA" in role_of_host or "CONTROLLER_LB" in \
|
||||
role_of_host:
|
||||
stop_flag = True
|
||||
if stop_flag:
|
||||
host_status = {
|
||||
'os_status': host_os_status['INSTALL_FAILED'],
|
||||
'os_progress': 0,
|
||||
'messages': "set %s to '%s' failed for 10 mins,is "
|
||||
"controller host,can't go on playing" % (
|
||||
addr, action)}
|
||||
daisy_cmn.update_db_host_status(self.req, host_detail['id'],
|
||||
host_status)
|
||||
message = "set %s to '%s' failed for 10 mins,is controller" \
|
||||
" host,can't go on playing" % (addr, action)
|
||||
raise exception.IMPIOprationFailed(message=message)
|
||||
else:
|
||||
LOG.info(
|
||||
_("set %s to '%s' failed for 10 mins,not controller"
|
||||
" host or no role ,go on playing" % (addr, action)))
|
||||
host_status = {
|
||||
'os_status': host_os_status['INSTALL_FAILED'],
|
||||
'os_progress': 0,
|
||||
'messages': "set %s to '%s' failed for 10 mins,not "
|
||||
"controller host or no role ,go on playing"
|
||||
% (addr, action)}
|
||||
daisy_cmn.update_db_host_status(self.req, host_detail['id'],
|
||||
host_status)
|
||||
|
||||
return ipmi_result_flag
|
||||
|
||||
def _install_os_for_baremetal(self, host_detail):
|
||||
# os_install_disk = 'sda'
|
||||
os_version_file = host_detail['os_version_file']
|
||||
# os_version_file and os_version_id only exist one at
|
||||
# same time
|
||||
if host_detail.get('os_version_file', None):
|
||||
os_version_file = host_detail['os_version_file']
|
||||
if host_detail.get('os_version_id', None):
|
||||
version_info = registry.get_version_metadata(self.req.context,
|
||||
host_detail['os_version_id'])
|
||||
if version_info:
|
||||
os_version = version_info['name']
|
||||
os_version_file = "/var/lib/daisy/" + os_version
|
||||
if os_version_file:
|
||||
test_os_version_exist = 'test -f %s' % os_version_file
|
||||
daisy_cmn.subprocess_call(test_os_version_exist)
|
||||
else:
|
||||
self.message = "no OS version file configed for host %s"\
|
||||
% host_detail['id']
|
||||
self.message = "No OS version file configed for host %s" %\
|
||||
host_detail['id']
|
||||
raise exception.NotFound(message=self.message)
|
||||
if host_detail.get('root_disk', None):
|
||||
root_disk = host_detail['root_disk']
|
||||
@ -367,7 +518,8 @@ class OSInstall():
|
||||
or host_detail['disks'][key]['name'].\
|
||||
find("mpath") != -1 \
|
||||
or host_detail['disks'][key]['name'].\
|
||||
find("spath") != -1:
|
||||
find("spath") != -1 \
|
||||
or host_detail['disks'][key]['removable'] == 'removable':
|
||||
continue
|
||||
disk_list.append(host_detail['disks'][key]['name'])
|
||||
stroage_size_str = host_detail['disks'][key]['size']
|
||||
@ -382,7 +534,9 @@ class OSInstall():
|
||||
root_pwd = 'ossdbg1'
|
||||
|
||||
isolcpus = None
|
||||
if 'os_cpus' in host_detail and host_detail['os_cpus']:
|
||||
if host_detail.get('isolcpus', None):
|
||||
isolcpus = host_detail['isolcpus']
|
||||
elif host_detail.get('os_cpus', None):
|
||||
os_cpus = utils.cpu_str_to_list(host_detail['os_cpus'])
|
||||
host_cpu = host_detail.get('cpu', {})
|
||||
if 'total' in host_cpu:
|
||||
@ -402,26 +556,37 @@ class OSInstall():
|
||||
hugepagesize = '1G'
|
||||
# tfg_patch_pkg_file = check_tfg_exist()
|
||||
|
||||
if (not host_detail['ipmi_user'] or
|
||||
not host_detail['ipmi_passwd'] or
|
||||
not host_detail['ipmi_addr']):
|
||||
self.message = "Invalid ipmi information configed for host %s" \
|
||||
% host_detail['id']
|
||||
raise exception.NotFound(message=self.message)
|
||||
host_manufacturer = host_detail['system'].get('manufacturer')
|
||||
if host_detail.get('hwm_id'):
|
||||
host_hwm_meta = {
|
||||
"hwm_ip": host_detail.get('hwm_ip'),
|
||||
"hwm_id": host_detail.get('hwm_id'),
|
||||
"boot_type": "pxe"
|
||||
}
|
||||
self.providerclient(host_hwm_meta['hwm_ip']).node.set_boot(
|
||||
**host_hwm_meta)
|
||||
elif host_manufacturer == 'Cisco Systems Inc':
|
||||
set_pxe_start_of_cisco(host_detail)
|
||||
else:
|
||||
if (not host_detail['ipmi_user'] or
|
||||
not host_detail['ipmi_passwd'] or
|
||||
not host_detail['ipmi_addr']):
|
||||
self.message = "Invalid ipmi information configed for host %s" \
|
||||
% host_detail['id']
|
||||
raise exception.NotFound(message=self.message)
|
||||
|
||||
ipmi_result_flag = self._set_boot_or_power_state(
|
||||
host_detail['ipmi_user'],
|
||||
host_detail['ipmi_passwd'],
|
||||
host_detail['ipmi_addr'],
|
||||
'pxe')
|
||||
ipmi_result_flag = self._set_boot_or_power_state(host_detail,
|
||||
'pxe')
|
||||
|
||||
host_interfaces = _get_host_interfaces(host_detail)
|
||||
kwargs = {'hostname': host_detail['name'],
|
||||
'iso_path': os_version_file,
|
||||
'group_list': host_detail['group_list'],
|
||||
# 'tfg_bin':tfg_patch_pkg_file,
|
||||
'dhcp_mac': host_detail['dhcp_mac'],
|
||||
'storage_size': disk_storage_size_m,
|
||||
'memory_size': memory_size_g,
|
||||
'interfaces': host_detail['interfaces'],
|
||||
'interfaces': host_interfaces,
|
||||
'root_lv_size': root_lv_size_m,
|
||||
'swap_lv_size': swap_lv_size_m,
|
||||
'cinder_vg_size': cinder_vg_size_m,
|
||||
@ -452,6 +617,12 @@ class OSInstall():
|
||||
else:
|
||||
kwargs['mongodb_lv_size'] = 0
|
||||
|
||||
if host_detail.get('provider_lv_size', None):
|
||||
# provider_lv_size_m = int(host_detail['provider_lv_size']) * 1024
|
||||
kwargs['provider_lv_size'] = host_detail['provider_lv_size']
|
||||
else:
|
||||
kwargs['provider_lv_size'] = 0
|
||||
|
||||
# if host_detail.has_key('nova_lv_size') and
|
||||
# host_detail['nova_lv_size']:
|
||||
if 'nova_lv_size' in host_detail and host_detail['nova_lv_size']:
|
||||
@ -467,17 +638,23 @@ class OSInstall():
|
||||
host_status = {'os_status': host_os_status['INSTALL_FAILED'],
|
||||
'os_progress': 0,
|
||||
'messages': error}
|
||||
daisy_cmn.update_db_host_status(self.req,
|
||||
host_detail['id'],
|
||||
host_status)
|
||||
daisy_cmn.update_db_host_status(self.req, host_detail['id'], host_status)
|
||||
msg = "ironic install os return failed for host %s" % \
|
||||
host_detail['id']
|
||||
raise exception.OSInstallFailed(message=msg)
|
||||
|
||||
self._set_boot_or_power_state(host_detail['ipmi_user'],
|
||||
host_detail['ipmi_passwd'],
|
||||
host_detail['ipmi_addr'],
|
||||
'reset')
|
||||
if host_detail.get('hwm_id'):
|
||||
host_hwm_meta = {
|
||||
"hwm_ip": host_detail.get('hwm_ip'),
|
||||
"hwm_id": host_detail.get('hwm_id')
|
||||
}
|
||||
self.providerclient(host_hwm_meta['hwm_ip']).node.restart(
|
||||
**host_hwm_meta)
|
||||
elif host_manufacturer == 'Cisco Systems Inc':
|
||||
set_reboot_of_cisco(host_detail)
|
||||
else:
|
||||
if ipmi_result_flag:
|
||||
self._set_boot_or_power_state(host_detail, 'reset')
|
||||
|
||||
def _begin_install_os(self, hosts_detail):
|
||||
# all hosts status is set to 'pre-install' before os installing
|
||||
@ -485,22 +662,34 @@ class OSInstall():
|
||||
host_status = {'os_status': host_os_status['PRE_INSTALL'],
|
||||
'os_progress': 0,
|
||||
'messages': 'Preparing for OS installation'}
|
||||
update_db_host_status(self.req, host_detail['id'], host_status)
|
||||
daisy_cmn.update_db_host_status(self.req, host_detail['id'],
|
||||
host_status)
|
||||
|
||||
for host_detail in hosts_detail:
|
||||
self._install_os_for_baremetal(host_detail)
|
||||
|
||||
def _set_disk_start_mode(self, host_detail):
|
||||
host_manufacturer = host_detail['system'].get('manufacturer')
|
||||
LOG.info(_("Set boot from disk for host %s" % (host_detail['id'])))
|
||||
self._set_boot_or_power_state(host_detail['ipmi_user'],
|
||||
host_detail['ipmi_passwd'],
|
||||
host_detail['ipmi_addr'],
|
||||
'disk')
|
||||
LOG.info(_("reboot host %s" % (host_detail['id'])))
|
||||
self._set_boot_or_power_state(host_detail['ipmi_user'],
|
||||
host_detail['ipmi_passwd'],
|
||||
host_detail['ipmi_addr'],
|
||||
'reset')
|
||||
if host_detail.get('hwm_id'):
|
||||
host_hwm_meta = {
|
||||
"hwm_ip": host_detail.get('hwm_ip'),
|
||||
"hwm_id": host_detail.get('hwm_id'),
|
||||
"boot_type": "disk"
|
||||
}
|
||||
self.providerclient(host_hwm_meta['hwm_ip']).node.set_boot(
|
||||
**host_hwm_meta)
|
||||
LOG.info(_("reboot host %s" % (host_detail['id'])))
|
||||
host_hwm_meta.pop('boot_type')
|
||||
self.providerclient(host_hwm_meta['hwm_ip']).node.restart(
|
||||
**host_hwm_meta)
|
||||
elif host_manufacturer == 'Cisco Systems Inc':
|
||||
set_disk_start_of_cisco(host_detail)
|
||||
set_reboot_of_cisco(host_detail)
|
||||
else:
|
||||
self._set_boot_or_power_state(host_detail, 'disk')
|
||||
LOG.info(_("reboot host %s" % (host_detail['id'])))
|
||||
self._set_boot_or_power_state(host_detail, 'reset')
|
||||
|
||||
def _init_progress(self, host_detail, hosts_status):
|
||||
host_id = host_detail['id']
|
||||
@ -514,7 +703,7 @@ class OSInstall():
|
||||
else:
|
||||
host_status['messages'] = "OS installing"
|
||||
|
||||
update_db_host_status(self.req, host_id, host_status)
|
||||
daisy_cmn.update_db_host_status(self.req, host_id, host_status)
|
||||
|
||||
def _query_host_progress(self, host_detail, host_status, host_last_status):
|
||||
host_id = host_detail['id']
|
||||
@ -575,11 +764,14 @@ class OSInstall():
|
||||
'messages'] = "docker container created timeout"
|
||||
else:
|
||||
host_status['messages'] = "os installed timeout"
|
||||
if daisy_cmn.in_cluster_list(self.cluster_id):
|
||||
LOG.info("os install clear install global variables")
|
||||
daisy_cmn.cluster_list_delete(self.cluster_id)
|
||||
if (host_status['os_progress'] !=
|
||||
host_last_status['os_progress'] or
|
||||
host_status['os_status'] != host_last_status['os_status']):
|
||||
host_status['count'] = 0
|
||||
update_db_host_status(self.req, host_id, host_status)
|
||||
daisy_cmn.update_db_host_status(self.req, host_id, host_status)
|
||||
return hosts_status
|
||||
|
||||
def _get_install_status(self, hosts_detail):
|
||||
@ -608,7 +800,11 @@ class OSInstall():
|
||||
'INSTALL_FAILED']
|
||||
host_status[
|
||||
'messages'] = "cluster os installed timeout"
|
||||
update_db_host_status(self.req, host_id, host_status)
|
||||
daisy_cmn.update_db_host_status(self.req, host_id,
|
||||
host_status)
|
||||
if daisy_cmn.in_cluster_list(self.cluster_id):
|
||||
LOG.info("os install clear install global variables")
|
||||
daisy_cmn.cluster_list_delete(self.cluster_id)
|
||||
break
|
||||
else:
|
||||
query_count += 1
|
||||
@ -617,6 +813,8 @@ class OSInstall():
|
||||
return hosts_install_status
|
||||
|
||||
def install_os(self, hosts_detail, role_hosts_ids):
|
||||
# 15 hosts ,install 10 firstly ,then 5 host
|
||||
# output :host_detail=5 ,role_hosts_ids is failed host among 10 hosts
|
||||
if len(hosts_detail) > self.max_parallel_os_num:
|
||||
install_hosts = hosts_detail[:self.max_parallel_os_num]
|
||||
hosts_detail = hosts_detail[self.max_parallel_os_num:]
|
||||
@ -628,12 +826,21 @@ class OSInstall():
|
||||
LOG.info(
|
||||
_("Begin install os for hosts %s." % ','.join(install_hosts_id)))
|
||||
daisy_cmn.os_install_start_time = time.time()
|
||||
for host_detail in install_hosts:
|
||||
host_manufacturer = host_detail['system'].get('manufacturer')
|
||||
if host_manufacturer == 'Cisco Systems Inc':
|
||||
host_detail['location'] = \
|
||||
get_host_location_of_cisco(host_detail)
|
||||
self._begin_install_os(install_hosts)
|
||||
install_hosts = _rm_ipmi_failed_host(self.req, install_hosts)
|
||||
LOG.info(_("Begin to query install progress..."))
|
||||
# wait to install completely
|
||||
cluster_install_status = self._get_install_status(install_hosts)
|
||||
total_time_cost = str(
|
||||
round((time.time() - daisy_cmn.os_install_start_time) / 60, 2))
|
||||
if daisy_cmn.in_cluster_list(self.cluster_id):
|
||||
daisy_cmn.cluster_list_delete(self.cluster_id)
|
||||
LOG.info("Clear install global variables")
|
||||
LOG.info(
|
||||
_("It totally takes %s min for all host to install os"
|
||||
% total_time_cost))
|
||||
@ -646,10 +853,7 @@ class OSInstall():
|
||||
_("%s %s %s" % (host_id, host_status['os_status'],
|
||||
host_status['messages'])))
|
||||
if host_id in role_hosts_ids:
|
||||
if host_status['os_status'] ==\
|
||||
host_os_status['INSTALL_FAILED']:
|
||||
break
|
||||
else:
|
||||
if host_status['os_status'] == host_os_status['ACTIVE']:
|
||||
role_hosts_ids.remove(host_id)
|
||||
return (hosts_detail, role_hosts_ids)
|
||||
|
||||
|
106
code/daisy/daisy/api/v1/backend_types.py
Executable file
106
code/daisy/daisy/api/v1/backend_types.py
Executable file
@ -0,0 +1,106 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import os
|
||||
import subprocess
|
||||
from oslo_log import log as logging
|
||||
from webob.exc import HTTPForbidden
|
||||
|
||||
from daisy import i18n
|
||||
from daisy import notifier
|
||||
|
||||
from daisy.api import policy
|
||||
from daisy.common import utils
|
||||
from daisy.common import wsgi
|
||||
import daisy.registry.client.v1.api as registry
|
||||
from daisy.api.v1 import controller
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
_ = i18n._
|
||||
_LE = i18n._LE
|
||||
_LI = i18n._LI
|
||||
_LW = i18n._LW
|
||||
|
||||
|
||||
class Controller(controller.BaseController):
|
||||
"""
|
||||
WSGI controller for hosts resource in Daisy v1 API
|
||||
|
||||
The hosts resource API is a RESTful web service for host data. The API
|
||||
is as follows::
|
||||
|
||||
GET /backend_types -- Returns a set of brief metadata
|
||||
about backend_types
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.notifier = notifier.Notifier()
|
||||
registry.configure_registry_client()
|
||||
self.policy = policy.Enforcer()
|
||||
|
||||
@utils.mutating
|
||||
def get(self, req):
|
||||
daisy_conf_path = "/home/daisy_install/daisy.conf"
|
||||
if os.path.exists(daisy_conf_path):
|
||||
scripts = "sed '/^[[:space:]]*#/d' " \
|
||||
"/home/daisy_install/daisy.conf | sed " \
|
||||
"/^[[:space:]]*$/d | grep " \
|
||||
"'default_backend_types[[:space:]]*=' | sed " \
|
||||
"'s/=/ /' | sed -e 's/^\w*\ *//'"
|
||||
try:
|
||||
default_backend_types = subprocess.check_output(
|
||||
scripts,
|
||||
shell=True,
|
||||
stderr=subprocess.STDOUT).strip()
|
||||
except:
|
||||
msg = 'Error occurred when running scripts ' \
|
||||
'to get default_backend_types'
|
||||
LOG.error(msg)
|
||||
raise HTTPForbidden(explanation=msg, request=req,
|
||||
content_type="text/plain")
|
||||
return {"default_backend_types": default_backend_types}
|
||||
|
||||
else:
|
||||
msg = "/home/daisy_intall/daisy.conf is not exist"
|
||||
LOG.error(msg)
|
||||
raise HTTPForbidden(explanation=msg, request=req,
|
||||
content_type="text/plain")
|
||||
|
||||
|
||||
class BackendTypesDeserializer(wsgi.JSONRequestDeserializer):
|
||||
"""Handles deserialization of specific controller method requests."""
|
||||
|
||||
def get(self, request):
|
||||
result = {}
|
||||
return result
|
||||
|
||||
|
||||
class BackendTypesSerializer(wsgi.JSONResponseSerializer):
|
||||
"""Handles serialization of specific controller method responses."""
|
||||
|
||||
def __init__(self):
|
||||
self.notifier = notifier.Notifier()
|
||||
|
||||
def get(self, response, result):
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(result)
|
||||
return response
|
||||
|
||||
|
||||
def create_resource():
|
||||
"""Version resource factory method"""
|
||||
deserializer = BackendTypesDeserializer()
|
||||
serializer = BackendTypesSerializer()
|
||||
return wsgi.Resource(Controller(), deserializer, serializer)
|
@ -25,6 +25,7 @@ from daisy.common import utils
|
||||
from daisy.common import wsgi
|
||||
from daisy import i18n
|
||||
import daisy.registry.client.v1.api as registry
|
||||
import daisy.api.backends.common as daisy_cmn
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
_ = i18n._
|
||||
@ -141,6 +142,10 @@ class Controller(controller.BaseController):
|
||||
|
||||
try:
|
||||
registry.delete_cluster_host(req.context, cluster_id, host_id)
|
||||
is_ssh_host = daisy_cmn._judge_ssh_host(req, host_id)
|
||||
if not is_ssh_host:
|
||||
host_data = {'os_status': 'init'}
|
||||
registry.update_host_metadata(req.context, host_id, host_data)
|
||||
except exception.NotFound as e:
|
||||
LOG.debug(utils.exception_to_str(e))
|
||||
raise webob.exc.HTTPNotFound(explanation=e.msg)
|
||||
|
@ -31,8 +31,14 @@ from daisy.api.v1 import networks
|
||||
from daisy.api.v1 import install
|
||||
from daisy.api.v1 import disk_array
|
||||
from daisy.api.v1 import host_template
|
||||
from daisy.api.v1 import backend_types
|
||||
from daisy.common import wsgi
|
||||
from daisy.api.v1 import backup_restore
|
||||
from daisy.api.v1 import versions
|
||||
from daisy.api.v1 import version_patchs
|
||||
from daisy.api.v1 import template_configs
|
||||
from daisy.api.v1 import template_funcs
|
||||
from daisy.api.v1 import template_services
|
||||
|
||||
|
||||
class API(wsgi.Router):
|
||||
@ -106,6 +112,11 @@ class API(wsgi.Router):
|
||||
action='update_pxe_host',
|
||||
conditions={'method': ['PUT']})
|
||||
|
||||
mapper.connect("/check",
|
||||
controller=hosts_resource,
|
||||
action='host_check',
|
||||
conditions={'method': ['POST']})
|
||||
|
||||
clusters_resource = clusters.create_resource()
|
||||
|
||||
mapper.connect("/clusters",
|
||||
@ -357,6 +368,10 @@ class API(wsgi.Router):
|
||||
action="cluster_config_set_progress",
|
||||
conditions={'method': ['POST']})
|
||||
|
||||
mapper.connect("/cluster_config_set_get",
|
||||
controller=config_sets_resource,
|
||||
action="cluster_config_set_get",
|
||||
conditions={'method': ['GET']})
|
||||
configs_resource = configs.create_resource()
|
||||
|
||||
mapper.connect("/configs",
|
||||
@ -444,11 +459,6 @@ class API(wsgi.Router):
|
||||
action='update_disk_array',
|
||||
conditions={'method': ['POST']})
|
||||
|
||||
# mapper.connect("/update/{cluster_id}/versions/{versions_id}",
|
||||
# controller=update_resource,
|
||||
# action='update_cluster_version',
|
||||
# conditions={'method': ['POST']})
|
||||
|
||||
array_resource = disk_array.create_resource()
|
||||
mapper.connect("/service_disk",
|
||||
controller=array_resource,
|
||||
@ -492,6 +502,23 @@ class API(wsgi.Router):
|
||||
action='cinder_volume_detail',
|
||||
conditions={'method': ['GET']})
|
||||
|
||||
mapper.connect("/optical_switch",
|
||||
controller=array_resource,
|
||||
action='optical_switch_add',
|
||||
conditions={'method': ['POST']})
|
||||
mapper.connect("/optical_switch/list",
|
||||
controller=array_resource,
|
||||
action='optical_switch_list',
|
||||
conditions={'method': ['GET']})
|
||||
mapper.connect("/optical_switch/{id}",
|
||||
controller=array_resource,
|
||||
action='optical_switch_update',
|
||||
conditions={'method': ['PUT']})
|
||||
mapper.connect("/optical_switch/{id}",
|
||||
controller=array_resource,
|
||||
action='optical_switch_delete',
|
||||
conditions={'method': ['DELETE']})
|
||||
|
||||
backup_restore_resource = backup_restore.create_resource()
|
||||
|
||||
mapper.connect("/backup",
|
||||
@ -511,6 +538,95 @@ class API(wsgi.Router):
|
||||
action='version',
|
||||
conditions={'method': ['POST']})
|
||||
|
||||
backend_types_resource = backend_types.create_resource()
|
||||
mapper.connect("/backend_types",
|
||||
controller=backend_types_resource,
|
||||
action='get',
|
||||
conditions={'method': ['POST']})
|
||||
|
||||
versions_resource = versions.create_resource()
|
||||
mapper.connect("/versions",
|
||||
controller=versions_resource,
|
||||
action='add_version',
|
||||
conditions={'method': ['POST']})
|
||||
mapper.connect("/versions/{id}",
|
||||
controller=versions_resource,
|
||||
action='delete_version',
|
||||
conditions={'method': ['DELETE']})
|
||||
mapper.connect("/versions",
|
||||
controller=versions_resource,
|
||||
action='list_version',
|
||||
conditions={'method': ['GET']})
|
||||
mapper.connect("/versions/{id}",
|
||||
controller=versions_resource,
|
||||
action='get_version',
|
||||
conditions={'method': ['GET']})
|
||||
mapper.connect("/versions/{id}",
|
||||
controller=versions_resource,
|
||||
action='update_version',
|
||||
conditions={'method': ['PUT']})
|
||||
|
||||
version_patchs_resource = version_patchs.create_resource()
|
||||
mapper.connect("/version_patchs",
|
||||
controller=version_patchs_resource,
|
||||
action='add_version_patch',
|
||||
conditions={'method': ['POST']})
|
||||
mapper.connect("/version_patchs/{id}",
|
||||
controller=version_patchs_resource,
|
||||
action='delete_version_patch',
|
||||
conditions={'method': ['DELETE']})
|
||||
mapper.connect("/version_patchs/{id}",
|
||||
controller=version_patchs_resource,
|
||||
action='get_version_patch',
|
||||
conditions={'method': ['GET']})
|
||||
mapper.connect("/version_patchs/{id}",
|
||||
controller=version_patchs_resource,
|
||||
action='update_version_patch',
|
||||
conditions={'method': ['PUT']})
|
||||
|
||||
template_configs_resource = template_configs.create_resource()
|
||||
mapper.connect("/template_configs/import_template_config",
|
||||
controller=template_configs_resource,
|
||||
action='import_template_config',
|
||||
conditions={'method': ['POST']})
|
||||
|
||||
mapper.connect("/template_configs/list",
|
||||
controller=template_configs_resource,
|
||||
action="list_template_config",
|
||||
conditions={'method': ['GET']})
|
||||
|
||||
mapper.connect("/template_configs/{id}",
|
||||
controller=template_configs_resource,
|
||||
action="get_template_config",
|
||||
conditions=dict(method=["GET"]))
|
||||
|
||||
template_funcs_resource = template_funcs.create_resource()
|
||||
mapper.connect("/template_funcs/import_template_func",
|
||||
controller=template_funcs_resource,
|
||||
action='import_template_func',
|
||||
conditions={'method': ['POST']})
|
||||
|
||||
mapper.connect("/template_funcs/list",
|
||||
controller=template_funcs_resource,
|
||||
action="list_template_func",
|
||||
conditions={'method': ['GET']})
|
||||
|
||||
mapper.connect("/template_funcs/{id}",
|
||||
controller=template_funcs_resource,
|
||||
action="get_template_func",
|
||||
conditions=dict(method=["GET"]))
|
||||
|
||||
template_services_resource = template_services.create_resource()
|
||||
mapper.connect("/template_services/list",
|
||||
controller=template_services_resource,
|
||||
action="list_template_service",
|
||||
conditions={'method': ['GET']})
|
||||
|
||||
mapper.connect("/template_services/{id}",
|
||||
controller=template_services_resource,
|
||||
action="get_template_service",
|
||||
conditions=dict(method=["GET"]))
|
||||
|
||||
path = os.path.join(os.path.abspath(os.path.dirname(
|
||||
os.path.realpath(__file__))),
|
||||
'ext')
|
||||
|
287
code/daisy/daisy/api/v1/template_configs.py
Normal file
287
code/daisy/daisy/api/v1/template_configs.py
Normal file
@ -0,0 +1,287 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
/template_configs endpoint for Daisy v1 API
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from webob.exc import HTTPBadRequest
|
||||
from webob.exc import HTTPConflict
|
||||
from webob.exc import HTTPForbidden
|
||||
from webob.exc import HTTPNotFound
|
||||
from webob import Response
|
||||
import copy
|
||||
import json
|
||||
|
||||
from daisy.api import policy
|
||||
import daisy.api.v1
|
||||
from daisy.api.v1 import controller
|
||||
from daisy.api.v1 import filters
|
||||
from daisy.common import exception
|
||||
from daisy.common import utils
|
||||
from daisy.common import wsgi
|
||||
from daisy import i18n
|
||||
from daisy import notifier
|
||||
import daisy.registry.client.v1.api as registry
|
||||
import daisy.api.backends.common as daisy_cmn
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
_ = i18n._
|
||||
_LE = i18n._LE
|
||||
_LI = i18n._LI
|
||||
_LW = i18n._LW
|
||||
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
|
||||
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
|
||||
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
|
||||
CONF.import_opt('container_formats', 'daisy.common.config',
|
||||
group='image_format')
|
||||
CONF.import_opt('image_property_quota', 'daisy.common.config')
|
||||
CONFIG_ITEMS = ['name', 'config_file', 'service', 'section_name', 'data_type']
|
||||
|
||||
|
||||
def check_template_config_format(template):
|
||||
def check_service_format(services):
|
||||
"""
|
||||
"service": {
|
||||
"compute": {"force_type": "service"},
|
||||
"glance": {"force_type": "none"}
|
||||
}
|
||||
"""
|
||||
for service_name, service_value in services.items():
|
||||
if service_name not in daisy_cmn.service_map.keys():
|
||||
raise HTTPBadRequest("service '%s' not in service table" %
|
||||
service_name)
|
||||
if 'force_type' not in service_value \
|
||||
or service_value['force_type'] not in ['service', 'node',
|
||||
'none']:
|
||||
raise HTTPBadRequest("No force_type or error force_type value"
|
||||
" in service")
|
||||
|
||||
def check_data_type(config):
|
||||
if config['data_type'] not in ['int', 'string', 'list', 'boolean',
|
||||
'float', 'ipaddr', 'password']:
|
||||
raise HTTPBadRequest("data_type '%s' in '%s' not support" % (
|
||||
config['data_type'], config['name']))
|
||||
|
||||
if not template:
|
||||
raise HTTPBadRequest('Template config is null!')
|
||||
|
||||
for value in template.values():
|
||||
for item in CONFIG_ITEMS:
|
||||
if not value.get(item):
|
||||
raise HTTPBadRequest('No service or config file found in '
|
||||
'template config!')
|
||||
check_data_type(value)
|
||||
check_service_format(value['service'])
|
||||
|
||||
|
||||
class Controller(controller.BaseController):
|
||||
"""
|
||||
WSGI controller for template_configs resource in Daisy v1 API
|
||||
|
||||
The template_configs resource API is a RESTful web service for
|
||||
template_config data.
|
||||
The API is as follows::
|
||||
|
||||
GET /template_configs -- Returns a set of brief metadata about
|
||||
template_configs
|
||||
GET /template_configs/detail -- Returns a set of detailed metadata
|
||||
about emplate_configs
|
||||
HEAD /template_configs/<ID> --
|
||||
Return metadata about an template_config with id <ID>
|
||||
GET /template_configs/<ID> --
|
||||
Return template_config data for template_config with id <ID>
|
||||
POST /template_configs --
|
||||
Store template_config data and return metadata about the
|
||||
newly-stored template_config
|
||||
PUT /template_configs/<ID> --
|
||||
Update template_config metadata and/or upload template_config
|
||||
data for a previously-reserved template_config
|
||||
DELETE /template_configs/<ID> -- Delete the template_config with <ID>
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.notifier = notifier.Notifier()
|
||||
registry.configure_registry_client()
|
||||
self.policy = policy.Enforcer()
|
||||
|
||||
def _enforce(self, req, action, target=None):
|
||||
"""Authorize an action against our policies"""
|
||||
if target is None:
|
||||
target = {}
|
||||
try:
|
||||
self.policy.enforce(req.context, action, target)
|
||||
except exception.Forbidden:
|
||||
raise HTTPForbidden()
|
||||
|
||||
def _get_filters(self, req):
|
||||
"""
|
||||
Return a dictionary of query param filters from the request
|
||||
|
||||
:param req: the Request object coming from the wsgi layer
|
||||
:retval a dict of key/value filters
|
||||
"""
|
||||
query_filters = {}
|
||||
for param in req.params:
|
||||
if param in SUPPORTED_FILTERS:
|
||||
query_filters[param] = req.params.get(param)
|
||||
if not filters.validate(param, query_filters[param]):
|
||||
raise HTTPBadRequest(_('Bad value passed to filter '
|
||||
'%(filter)s got %(val)s')
|
||||
% {'filter': param,
|
||||
'val': query_filters[param]})
|
||||
return query_filters
|
||||
|
||||
def _get_query_params(self, req):
|
||||
"""
|
||||
Extracts necessary query params from request.
|
||||
|
||||
:param req: the WSGI Request object
|
||||
:retval dict of parameters that can be used by registry client
|
||||
"""
|
||||
params = {'filters': self._get_filters(req)}
|
||||
|
||||
for PARAM in SUPPORTED_PARAMS:
|
||||
if PARAM in req.params:
|
||||
params[PARAM] = req.params.get(PARAM)
|
||||
return params
|
||||
|
||||
def _raise_404_if_cluster_deleted(self, req, cluster_id):
|
||||
cluster = self.get_cluster_meta_or_404(req, cluster_id)
|
||||
if cluster['deleted']:
|
||||
msg = _("cluster with identifier %s has been deleted.") % \
|
||||
cluster_id
|
||||
raise HTTPNotFound(msg)
|
||||
|
||||
|
||||
@utils.mutating
|
||||
def get_template_config(self, req, id):
|
||||
"""
|
||||
Returns metadata about an template_config in the HTTP headers of the
|
||||
response object
|
||||
|
||||
:param req: The WSGI/Webob Request object
|
||||
:param id: The opaque template_config identifier
|
||||
|
||||
:raises HTTPNotFound if template_config metadata is not available to user
|
||||
"""
|
||||
self._enforce(req, 'get_template_config')
|
||||
template_config_meta = self.get_template_config_meta_or_404(req, id)
|
||||
return {'template_config_meta': template_config_meta}
|
||||
|
||||
def list_template_config(self, req):
|
||||
"""
|
||||
Returns detailed information for all available template_configs
|
||||
|
||||
:param req: The WSGI/Webob Request object
|
||||
:retval The response body is a mapping of the following form::
|
||||
|
||||
{'template_configs': [
|
||||
{'id': <ID>,
|
||||
'name': <NAME>,
|
||||
'description': <DESCRIPTION>,
|
||||
'created_at': <TIMESTAMP>,
|
||||
'updated_at': <TIMESTAMP>,
|
||||
'deleted_at': <TIMESTAMP>|<NONE>,}, ...
|
||||
]}
|
||||
"""
|
||||
self._enforce(req, 'list_template_config')
|
||||
params = self._get_query_params(req)
|
||||
try:
|
||||
template_configs = registry.list_template_config_metadata(
|
||||
req.context, **params)
|
||||
except exception.Invalid as e:
|
||||
raise HTTPBadRequest(explanation=e.msg, request=req)
|
||||
return dict(template_configs=template_configs)
|
||||
|
||||
@utils.mutating
|
||||
def import_template_config(self, req, template_config_meta):
|
||||
self._enforce(req, 'import_template_config')
|
||||
try:
|
||||
template = json.loads(template_config_meta.get('template', None))
|
||||
except ValueError as e:
|
||||
LOG.error(e.message)
|
||||
raise HTTPBadRequest(explanation=e.message, request=req)
|
||||
check_template_config_format(template)
|
||||
template_config_meta = registry.import_template_config_metadata(
|
||||
req.context, template_config_meta)
|
||||
return {'template_config_meta': template_config_meta}
|
||||
|
||||
|
||||
class TemplateConfigSetDeserializer(wsgi.JSONRequestDeserializer):
|
||||
"""Handles deserialization of specific controller method requests."""
|
||||
|
||||
def _deserialize(self, request):
|
||||
result = {}
|
||||
result["template_config_meta"] = utils.get_dict_meta(request)
|
||||
return result
|
||||
|
||||
def add_template_config(self, request):
|
||||
return self._deserialize(request)
|
||||
|
||||
def update_template_config(self, request):
|
||||
return self._deserialize(request)
|
||||
|
||||
def import_template_config(self, request):
|
||||
return self._deserialize(request)
|
||||
|
||||
|
||||
class TemplateConfigSetSerializer(wsgi.JSONResponseSerializer):
|
||||
"""Handles serialization of specific controller method responses."""
|
||||
|
||||
def __init__(self):
|
||||
self.notifier = notifier.Notifier()
|
||||
|
||||
def add_template_config(self, response, result):
|
||||
template_config_meta = result['template_config_meta']
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(
|
||||
dict(template_config=template_config_meta))
|
||||
return response
|
||||
|
||||
def delete_template_config(self, response, result):
|
||||
template_config_meta = result['template_config_meta']
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(
|
||||
dict(template_config=template_config_meta))
|
||||
return response
|
||||
|
||||
def get_template_config(self, response, result):
|
||||
template_config_meta = result['template_config_meta']
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(
|
||||
dict(template_config=template_config_meta))
|
||||
return response
|
||||
|
||||
def import_template_config(self, response, result):
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(result)
|
||||
return response
|
||||
|
||||
|
||||
def create_resource():
|
||||
"""template_configs resource factory method"""
|
||||
deserializer = TemplateConfigSetDeserializer()
|
||||
serializer = TemplateConfigSetSerializer()
|
||||
return wsgi.Resource(Controller(), deserializer, serializer)
|
261
code/daisy/daisy/api/v1/template_funcs.py
Normal file
261
code/daisy/daisy/api/v1/template_funcs.py
Normal file
@ -0,0 +1,261 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
/template_funcs endpoint for Daisy v1 API
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from webob.exc import HTTPBadRequest
|
||||
from webob.exc import HTTPConflict
|
||||
from webob.exc import HTTPForbidden
|
||||
from webob.exc import HTTPNotFound
|
||||
from webob import Response
|
||||
import copy
|
||||
import json
|
||||
|
||||
from daisy.api import policy
|
||||
import daisy.api.v1
|
||||
from daisy.api.v1 import controller
|
||||
from daisy.api.v1 import filters
|
||||
from daisy.common import exception
|
||||
from daisy.common import utils
|
||||
from daisy.common import wsgi
|
||||
from daisy import i18n
|
||||
from daisy import notifier
|
||||
import daisy.registry.client.v1.api as registry
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
_ = i18n._
|
||||
_LE = i18n._LE
|
||||
_LI = i18n._LI
|
||||
_LW = i18n._LW
|
||||
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
|
||||
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
|
||||
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
|
||||
CONF.import_opt('container_formats', 'daisy.common.config',
|
||||
group='image_format')
|
||||
CONF.import_opt('image_property_quota', 'daisy.common.config')
|
||||
FUNC_ITEMS = ['name', 'config']
|
||||
|
||||
|
||||
def check_template_func_format(template):
|
||||
if not template:
|
||||
raise HTTPBadRequest('Template function is null!')
|
||||
for value in template.values():
|
||||
for item in FUNC_ITEMS:
|
||||
if not value.get(item):
|
||||
raise HTTPBadRequest('No configs found in template function!')
|
||||
if not isinstance(value['config'], dict):
|
||||
raise HTTPBadRequest('Config in template function should be dict '
|
||||
'type')
|
||||
|
||||
|
||||
class Controller(controller.BaseController):
|
||||
"""
|
||||
WSGI controller for template_funcs resource in Daisy v1 API
|
||||
|
||||
The template_funcs resource API is a RESTful web service for template_func.
|
||||
The API is as follows::
|
||||
|
||||
GET /template_funcs -- Returns a set of brief metadata about
|
||||
template_funcs
|
||||
GET /template_funcs/detail -- Returns a set of detailed metadata about
|
||||
template_funcs
|
||||
HEAD /template_funcs/<ID> --
|
||||
Return metadata about an template_func with id <ID>
|
||||
GET /template_funcs/<ID> --
|
||||
Return template_func data for template_func with id <ID>
|
||||
POST /template_funcs --
|
||||
Store template_func data and return metadata about the
|
||||
newly-stored template_func
|
||||
PUT /template_funcs/<ID> --
|
||||
Update template_func metadata and/or upload template_func
|
||||
data for a previously-reserved template_func
|
||||
DELETE /template_funcs/<ID> -- Delete the template_funcs with id <ID>
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.notifier = notifier.Notifier()
|
||||
registry.configure_registry_client()
|
||||
self.policy = policy.Enforcer()
|
||||
|
||||
def _enforce(self, req, action, target=None):
|
||||
"""Authorize an action against our policies"""
|
||||
if target is None:
|
||||
target = {}
|
||||
try:
|
||||
self.policy.enforce(req.context, action, target)
|
||||
except exception.Forbidden:
|
||||
raise HTTPForbidden()
|
||||
|
||||
def _get_filters(self, req):
|
||||
"""
|
||||
Return a dictionary of query param filters from the request
|
||||
|
||||
:param req: the Request object coming from the wsgi layer
|
||||
:retval a dict of key/value filters
|
||||
"""
|
||||
query_filters = {}
|
||||
for param in req.params:
|
||||
if param in SUPPORTED_FILTERS:
|
||||
query_filters[param] = req.params.get(param)
|
||||
if not filters.validate(param, query_filters[param]):
|
||||
raise HTTPBadRequest(_('Bad value passed to filter '
|
||||
'%(filter)s got %(val)s')
|
||||
% {'filter': param,
|
||||
'val': query_filters[param]})
|
||||
return query_filters
|
||||
|
||||
def _get_query_params(self, req):
|
||||
"""
|
||||
Extracts necessary query params from request.
|
||||
|
||||
:param req: the WSGI Request object
|
||||
:retval dict of parameters that can be used by registry client
|
||||
"""
|
||||
params = {'filters': self._get_filters(req)}
|
||||
|
||||
for PARAM in SUPPORTED_PARAMS:
|
||||
if PARAM in req.params:
|
||||
params[PARAM] = req.params.get(PARAM)
|
||||
return params
|
||||
|
||||
@utils.mutating
|
||||
def get_template_func(self, req, id, template_func_meta):
|
||||
"""
|
||||
Returns metadata about an template_func in the HTTP headers of the
|
||||
response object
|
||||
|
||||
:param req: The WSGI/Webob Request object
|
||||
:param id: The opaque template_func identifier
|
||||
|
||||
:raises HTTPNotFound if template_func metadata is not available to user
|
||||
"""
|
||||
self._enforce(req, 'get_template_func')
|
||||
params = {'filters': {}}
|
||||
|
||||
if template_func_meta.get('cluster_id'):
|
||||
params['filters'].update({'cluster_id':
|
||||
template_func_meta['cluster_id']})
|
||||
template_func_meta = self.get_template_func_meta_or_404(req, id,
|
||||
**params)
|
||||
return {'template_func_meta': template_func_meta}
|
||||
|
||||
def list_template_func(self, req):
|
||||
"""
|
||||
Returns detailed information for all available template_funcs
|
||||
|
||||
:param req: The WSGI/Webob Request object
|
||||
:retval The response body is a mapping of the following form::
|
||||
|
||||
{'template_funcs': [
|
||||
{'id': <ID>,
|
||||
'func_name': <FUNC_NAME>,
|
||||
'cn_desc': <CH_DESC>,
|
||||
'en_desc': <EN_DESC>,
|
||||
'data_check_script': <DATA_CHECK_SCRIPT>,
|
||||
'created_at': <TIMESTAMP>,
|
||||
'updated_at': <TIMESTAMP>,
|
||||
'deleted_at': <TIMESTAMP>|<NONE>,}, ...
|
||||
]}
|
||||
"""
|
||||
self._enforce(req, 'list_template_func')
|
||||
params = self._get_query_params(req)
|
||||
try:
|
||||
template_funcs = registry.list_template_func_metadata(
|
||||
req.context, **params)
|
||||
except exception.Invalid as e:
|
||||
raise HTTPBadRequest(explanation=e.msg, request=req)
|
||||
return dict(template_funcs=template_funcs)
|
||||
|
||||
@utils.mutating
|
||||
def import_template_func(self, req, template_func_meta):
|
||||
self._enforce(req, 'import_template_func')
|
||||
try:
|
||||
template = json.loads(template_func_meta.get('template', None))
|
||||
except ValueError as e:
|
||||
LOG.error(e.message)
|
||||
raise HTTPBadRequest(explanation=e.message, request=req)
|
||||
check_template_func_format(template)
|
||||
template_func_meta = registry.import_template_func_metadata(
|
||||
req.context, template_func_meta)
|
||||
return {'template_func_meta': template_func_meta}
|
||||
|
||||
|
||||
class TemplateFuncSetDeserializer(wsgi.JSONRequestDeserializer):
|
||||
"""Handles deserialization of specific controller method requests."""
|
||||
|
||||
def _deserialize(self, request):
|
||||
result = {}
|
||||
result["template_func_meta"] = utils.get_dict_meta(request)
|
||||
return result
|
||||
|
||||
def add_template_func(self, request):
|
||||
return self._deserialize(request)
|
||||
|
||||
def update_template_func(self, request):
|
||||
return self._deserialize(request)
|
||||
|
||||
def get_template_func(self, request):
|
||||
return self._deserialize(request)
|
||||
|
||||
def import_template_func(self, request):
|
||||
return self._deserialize(request)
|
||||
|
||||
|
||||
class TemplateFuncSetSerializer(wsgi.JSONResponseSerializer):
|
||||
"""Handles serialization of specific controller method responses."""
|
||||
|
||||
def __init__(self):
|
||||
self.notifier = notifier.Notifier()
|
||||
|
||||
def add_template_func(self, response, result):
|
||||
template_func_meta = result['template_func_meta']
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(dict(template_func=template_func_meta))
|
||||
return response
|
||||
|
||||
def delete_template_func(self, response, result):
|
||||
template_func_meta = result['template_func_meta']
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(dict(template_func=template_func_meta))
|
||||
return response
|
||||
|
||||
def get_template_func(self, response, result):
|
||||
template_func_meta = result['template_func_meta']
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(dict(template_func=template_func_meta))
|
||||
return response
|
||||
|
||||
def import_template_func(self, response, result):
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(result)
|
||||
return response
|
||||
|
||||
|
||||
def create_resource():
|
||||
"""template_funcs resource factory method"""
|
||||
deserializer = TemplateFuncSetDeserializer()
|
||||
serializer = TemplateFuncSetSerializer()
|
||||
return wsgi.Resource(Controller(), deserializer, serializer)
|
229
code/daisy/daisy/api/v1/template_services.py
Normal file
229
code/daisy/daisy/api/v1/template_services.py
Normal file
@ -0,0 +1,229 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
/template_services endpoint for Daisy v1 API
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from webob.exc import HTTPBadRequest
|
||||
from webob.exc import HTTPConflict
|
||||
from webob.exc import HTTPForbidden
|
||||
from webob.exc import HTTPNotFound
|
||||
from webob import Response
|
||||
import copy
|
||||
import json
|
||||
|
||||
from daisy.api import policy
|
||||
import daisy.api.v1
|
||||
from daisy.api.v1 import controller
|
||||
from daisy.api.v1 import filters
|
||||
from daisy.common import exception
|
||||
from daisy.common import utils
|
||||
from daisy.common import wsgi
|
||||
from daisy import i18n
|
||||
from daisy import notifier
|
||||
import daisy.registry.client.v1.api as registry
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
_ = i18n._
|
||||
_LE = i18n._LE
|
||||
_LI = i18n._LI
|
||||
_LW = i18n._LW
|
||||
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
|
||||
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
|
||||
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
|
||||
CONF.import_opt('container_formats', 'daisy.common.config',
|
||||
group='image_format')
|
||||
CONF.import_opt('image_property_quota', 'daisy.common.config')
|
||||
|
||||
|
||||
class Controller(controller.BaseController):
|
||||
"""
|
||||
WSGI controller for template_services resource in Daisy v1 API
|
||||
|
||||
The template_services resource API is a RESTful web service for
|
||||
template_service data.
|
||||
The API is as follows::
|
||||
|
||||
GET /template_services -- Returns a set of brief metadata about
|
||||
template_services
|
||||
GET /template_services/detail -- Returns a set of detailed metadata
|
||||
about template_services
|
||||
HEAD /template_services/<ID> --
|
||||
Return metadata about an template_service with id <ID>
|
||||
GET /template_services/<ID> --
|
||||
Return template_service data for template_service with id <ID>
|
||||
POST /template_services --
|
||||
Store template_service data and return metadata about the
|
||||
newly-stored template_service
|
||||
PUT /template_services/<ID> --
|
||||
Update template_service metadata and/or upload template_service
|
||||
data for a previously-reserved template_service
|
||||
DELETE /template_services/<ID> -- Delete the template_services with id
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.notifier = notifier.Notifier()
|
||||
registry.configure_registry_client()
|
||||
self.policy = policy.Enforcer()
|
||||
|
||||
def _enforce(self, req, action, target=None):
|
||||
"""Authorize an action against our policies"""
|
||||
if target is None:
|
||||
target = {}
|
||||
try:
|
||||
self.policy.enforce(req.context, action, target)
|
||||
except exception.Forbidden:
|
||||
raise HTTPForbidden()
|
||||
|
||||
def _get_filters(self, req):
|
||||
"""
|
||||
Return a dictionary of query param filters from the request
|
||||
|
||||
:param req: the Request object coming from the wsgi layer
|
||||
:retval a dict of key/value filters
|
||||
"""
|
||||
query_filters = {}
|
||||
for param in req.params:
|
||||
if param in SUPPORTED_FILTERS:
|
||||
query_filters[param] = req.params.get(param)
|
||||
if not filters.validate(param, query_filters[param]):
|
||||
raise HTTPBadRequest(_('Bad value passed to filter '
|
||||
'%(filter)s got %(val)s')
|
||||
% {'filter': param,
|
||||
'val': query_filters[param]})
|
||||
return query_filters
|
||||
|
||||
def _get_query_params(self, req):
|
||||
"""
|
||||
Extracts necessary query params from request.
|
||||
|
||||
:param req: the WSGI Request object
|
||||
:retval dict of parameters that can be used by registry client
|
||||
"""
|
||||
params = {'filters': self._get_filters(req)}
|
||||
|
||||
for PARAM in SUPPORTED_PARAMS:
|
||||
if PARAM in req.params:
|
||||
params[PARAM] = req.params.get(PARAM)
|
||||
return params
|
||||
|
||||
|
||||
@utils.mutating
|
||||
def get_template_service(self, req, id):
|
||||
"""
|
||||
Returns metadata about an template_service in the HTTP headers of the
|
||||
response object
|
||||
|
||||
:param req: The WSGI/Webob Request object
|
||||
:param id: The opaque template_service identifier
|
||||
|
||||
:raises HTTPNotFound if template_service metadata is not available to user
|
||||
"""
|
||||
self._enforce(req, 'get_template_service')
|
||||
template_service_meta = self.get_template_service_meta_or_404(req, id)
|
||||
return {'template_service_meta': template_service_meta}
|
||||
|
||||
def list_template_service(self, req):
|
||||
"""
|
||||
Returns detailed information for all available template_services
|
||||
|
||||
:param req: The WSGI/Webob Request object
|
||||
:retval The response body is a mapping of the following form::
|
||||
|
||||
{'template_services': [
|
||||
{'id': <ID>,
|
||||
'service_name': <SERVICE_NAME>,
|
||||
'force_type': <FORCE_TYPE>,
|
||||
'created_at': <TIMESTAMP>,
|
||||
'updated_at': <TIMESTAMP>,
|
||||
'deleted_at': <TIMESTAMP>|<NONE>,}, ...
|
||||
]}
|
||||
"""
|
||||
self._enforce(req, 'list_template_service')
|
||||
params = self._get_query_params(req)
|
||||
try:
|
||||
template_services = registry.list_template_service_metadata(
|
||||
req.context, **params)
|
||||
except exception.Invalid as e:
|
||||
raise HTTPBadRequest(explanation=e.msg, request=req)
|
||||
return dict(template_services=template_services)
|
||||
|
||||
|
||||
class TemplateServiceSetDeserializer(wsgi.JSONRequestDeserializer):
|
||||
"""Handles deserialization of specific controller method requests."""
|
||||
|
||||
def _deserialize(self, request):
|
||||
result = {}
|
||||
result["template_service_meta"] = utils.get_dict_meta(request)
|
||||
return result
|
||||
|
||||
def add_template_service(self, request):
|
||||
return self._deserialize(request)
|
||||
|
||||
def update_template_service(self, request):
|
||||
return self._deserialize(request)
|
||||
|
||||
def import_template_service(self, request):
|
||||
return self._deserialize(request)
|
||||
|
||||
|
||||
class TemplateServiceSetSerializer(wsgi.JSONResponseSerializer):
|
||||
"""Handles serialization of specific controller method responses."""
|
||||
|
||||
def __init__(self):
|
||||
self.notifier = notifier.Notifier()
|
||||
|
||||
def add_template_service(self, response, result):
|
||||
template_service_meta = result['template_service_meta']
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(
|
||||
dict(template_service=template_service_meta))
|
||||
return response
|
||||
|
||||
def delete_template_service(self, response, result):
|
||||
template_service_meta = result['template_service_meta']
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(
|
||||
dict(template_service=template_service_meta))
|
||||
return response
|
||||
|
||||
def get_template_service(self, response, result):
|
||||
template_service_meta = result['template_service_meta']
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(
|
||||
dict(template_service=template_service_meta))
|
||||
return response
|
||||
|
||||
def import_template_service(self, response, result):
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(result)
|
||||
return response
|
||||
|
||||
|
||||
def create_resource():
|
||||
"""template_services resource factory method"""
|
||||
deserializer = TemplateServiceSetDeserializer()
|
||||
serializer = TemplateServiceSetSerializer()
|
||||
return wsgi.Resource(Controller(), deserializer, serializer)
|
303
code/daisy/daisy/api/v1/version_patchs.py
Normal file
303
code/daisy/daisy/api/v1/version_patchs.py
Normal file
@ -0,0 +1,303 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
/versions endpoint for Daisy v1 API
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from webob.exc import HTTPBadRequest
|
||||
from webob.exc import HTTPConflict
|
||||
from webob.exc import HTTPForbidden
|
||||
from webob.exc import HTTPNotFound
|
||||
from webob import Response
|
||||
|
||||
from daisy.api import policy
|
||||
import daisy.api.v1
|
||||
from daisy.api.v1 import controller
|
||||
from daisy.api.v1 import filters
|
||||
from daisy.common import exception
|
||||
from daisy.common import utils
|
||||
from daisy.common import wsgi
|
||||
from daisy import i18n
|
||||
from daisy import notifier
|
||||
import daisy.registry.client.v1.api as registry
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
_ = i18n._
|
||||
_LE = i18n._LE
|
||||
_LI = i18n._LI
|
||||
_LW = i18n._LW
|
||||
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
|
||||
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
|
||||
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
|
||||
SUPPORTED_DEPLOYMENT_BACKENDS = ('tecs', 'zenic', 'proton')
|
||||
SUPPORT_DISK_LOCATION = ('local', 'share')
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
|
||||
CONF.import_opt('container_formats', 'daisy.common.config',
|
||||
group='image_format')
|
||||
CONF.import_opt('image_property_quota', 'daisy.common.config')
|
||||
|
||||
|
||||
class Controller(controller.BaseController):
|
||||
"""
|
||||
WSGI controller for versions resource in Daisy v1 API
|
||||
|
||||
The versions resource API is a RESTful web role for role data. The API
|
||||
is as follows::
|
||||
|
||||
GET /versions -- Returns a set of brief metadata about versions
|
||||
GET /versions/detail -- Returns a set of detailed metadata about
|
||||
versions
|
||||
HEAD /versions/<ID> -- Return metadata about an role with id <ID>
|
||||
GET /versions/<ID> -- Return role data for role with id <ID>
|
||||
POST /versions -- Store role data and return metadata about the
|
||||
newly-stored role
|
||||
PUT /versions/<ID> -- Update role metadata and/or upload role
|
||||
data for a previously-reserved role
|
||||
DELETE /versions/<ID> -- Delete the role with id <ID>
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.notifier = notifier.Notifier()
|
||||
registry.configure_registry_client()
|
||||
self.policy = policy.Enforcer()
|
||||
|
||||
def _enforce(self, req, action, target=None):
|
||||
"""Authorize an action against our policies"""
|
||||
if target is None:
|
||||
target = {}
|
||||
try:
|
||||
self.policy.enforce(req.context, action, target)
|
||||
except exception.Forbidden:
|
||||
raise HTTPForbidden()
|
||||
|
||||
def _get_filters(self, req):
|
||||
"""
|
||||
Return a dictionary of query param filters from the request
|
||||
|
||||
:param req: the Request object coming from the wsgi layer
|
||||
:retval a dict of key/value filters
|
||||
"""
|
||||
query_filters = {}
|
||||
for param in req.params:
|
||||
if param in SUPPORTED_FILTERS:
|
||||
query_filters[param] = req.params.get(param)
|
||||
if not filters.validate(param, query_filters[param]):
|
||||
raise HTTPBadRequest(_('Bad value passed to filter '
|
||||
'%(filter)s got %(val)s')
|
||||
% {'filter': param,
|
||||
'val': query_filters[param]})
|
||||
return query_filters
|
||||
|
||||
def _get_query_params(self, req):
|
||||
"""
|
||||
Extracts necessary query params from request.
|
||||
|
||||
:param req: the WSGI Request object
|
||||
:retval dict of parameters that can be used by registry client
|
||||
"""
|
||||
params = {'filters': self._get_filters(req)}
|
||||
|
||||
for PARAM in SUPPORTED_PARAMS:
|
||||
if PARAM in req.params:
|
||||
params[PARAM] = req.params.get(PARAM)
|
||||
return params
|
||||
|
||||
@utils.mutating
|
||||
def add_version_patch(self, req, version_patch_meta):
|
||||
"""
|
||||
Adds a new version to Daisy.
|
||||
|
||||
:param req: The WSGI/Webob Request object
|
||||
:param image_meta: Mapping of metadata about role
|
||||
|
||||
:raises HTTPBadRequest if x-role-name is missing
|
||||
"""
|
||||
self._enforce(req, 'add_version_patch')
|
||||
version_patch_name = version_patch_meta.get('name')
|
||||
if not version_patch_meta.get('status', None):
|
||||
version_patch_meta['status'] = "unused"
|
||||
if not version_patch_name:
|
||||
raise ValueError('version patch name is null!')
|
||||
version_patch_meta = \
|
||||
registry.add_version_patch_metadata(req.context,
|
||||
version_patch_meta)
|
||||
|
||||
return {'version_patch_meta': version_patch_meta}
|
||||
|
||||
@utils.mutating
|
||||
def delete_version_patch(self, req, id):
|
||||
"""
|
||||
Deletes a version patch from Daisy.
|
||||
|
||||
:param req: The WSGI/Webob Request object
|
||||
:param image_meta: Mapping of metadata about role
|
||||
|
||||
:raises HTTPBadRequest if x-role-name is missing
|
||||
"""
|
||||
self._enforce(req, 'delete_version_patch')
|
||||
self.get_version_patch_meta_or_404(req, id)
|
||||
print "delete_version_patch:%s" % id
|
||||
try:
|
||||
registry.delete_version_patch_metadata(req.context, id)
|
||||
except exception.NotFound as e:
|
||||
msg = (_("Failed to find version patch to delete: %s") %
|
||||
utils.exception_to_str(e))
|
||||
LOG.warn(msg)
|
||||
raise HTTPNotFound(explanation=msg,
|
||||
request=req,
|
||||
content_type="text/plain")
|
||||
except exception.Forbidden as e:
|
||||
msg = (_("Forbidden to delete version patch: %s") %
|
||||
utils.exception_to_str(e))
|
||||
LOG.warn(msg)
|
||||
raise HTTPForbidden(explanation=msg,
|
||||
request=req,
|
||||
content_type="text/plain")
|
||||
except exception.InUseByStore as e:
|
||||
msg = (_("version patch %(id)s could not be deleted "
|
||||
"because it is in use: " "%(exc)s")
|
||||
% {"id": id, "exc": utils.exception_to_str(e)})
|
||||
LOG.warn(msg)
|
||||
raise HTTPConflict(explanation=msg,
|
||||
request=req,
|
||||
content_type="text/plain")
|
||||
else:
|
||||
# self.notifier.info('role.delete', role)
|
||||
return Response(body='', status=200)
|
||||
|
||||
@utils.mutating
|
||||
def get_version_patch(self, req, id):
|
||||
"""
|
||||
Returns metadata about an version patch in the HTTP headers of the
|
||||
response object
|
||||
|
||||
:param req: The WSGI/Webob Request object
|
||||
:param id: The opaque version patch identifier
|
||||
|
||||
:raises HTTPNotFound if version patch metadata is not available to user
|
||||
"""
|
||||
self._enforce(req, 'get_version_patch')
|
||||
version_patch_meta = self.get_version_patch_meta_or_404(req, id)
|
||||
return {'version_patch_meta': version_patch_meta}
|
||||
|
||||
@utils.mutating
|
||||
def update_version_patch(self, req, id, version_patch_meta):
|
||||
"""
|
||||
Updates an existing version patch with the registry.
|
||||
|
||||
:param request: The WSGI/Webob Request object
|
||||
:param id: The opaque image identifier
|
||||
|
||||
:retval Returns the updated image information as a mapping
|
||||
"""
|
||||
self._enforce(req, 'update_version_patch')
|
||||
orig_version_meta = self.get_version_patch_meta_or_404(req, id)
|
||||
|
||||
if orig_version_meta['deleted']:
|
||||
msg = _("Forbidden to update deleted version patch.")
|
||||
raise HTTPForbidden(explanation=msg,
|
||||
request=req,
|
||||
content_type="text/plain")
|
||||
try:
|
||||
version_patch_meta = registry.update_version_patch_metadata(
|
||||
req.context,
|
||||
id,
|
||||
version_patch_meta)
|
||||
|
||||
except exception.Invalid as e:
|
||||
msg = (_("Failed to update version patch. Got error: %s")
|
||||
% utils.exception_to_str(e))
|
||||
LOG.warn(msg)
|
||||
raise HTTPBadRequest(explanation=msg,
|
||||
request=req,
|
||||
content_type="text/plain")
|
||||
except exception.NotFound as e:
|
||||
msg = (_("Failed to find version patch to update: %s") %
|
||||
utils.exception_to_str(e))
|
||||
LOG.warn(msg)
|
||||
raise HTTPNotFound(explanation=msg,
|
||||
request=req,
|
||||
content_type="text/plain")
|
||||
except exception.Forbidden as e:
|
||||
msg = (_("Forbidden to update version patch: %s") %
|
||||
utils.exception_to_str(e))
|
||||
LOG.warn(msg)
|
||||
raise HTTPForbidden(explanation=msg,
|
||||
request=req,
|
||||
content_type="text/plain")
|
||||
except (exception.Conflict, exception.Duplicate) as e:
|
||||
LOG.warn(utils.exception_to_str(e))
|
||||
raise HTTPConflict(body=_('Host operation conflicts'),
|
||||
request=req,
|
||||
content_type='text/plain')
|
||||
else:
|
||||
self.notifier.info('version_patchs.update', version_patch_meta)
|
||||
|
||||
return {'version_patch_meta': version_patch_meta}
|
||||
|
||||
|
||||
class VersionPatchDeserializer(wsgi.JSONRequestDeserializer):
|
||||
"""Handles deserialization of specific controller method requests."""
|
||||
|
||||
def _deserialize(self, request):
|
||||
result = {}
|
||||
result["version_patch_meta"] = utils.get_dict_meta(request)
|
||||
return result
|
||||
|
||||
def add_version_patch(self, request):
|
||||
return self._deserialize(request)
|
||||
|
||||
def update_version_patch(self, request):
|
||||
return self._deserialize(request)
|
||||
|
||||
|
||||
class VersionPatchserializer(wsgi.JSONResponseSerializer):
|
||||
"""Handles serialization of specific controller method responses."""
|
||||
|
||||
def __init__(self):
|
||||
self.notifier = notifier.Notifier()
|
||||
|
||||
def add_version_patch(self, response, result):
|
||||
version_patch_meta = result['version_patch_meta']
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(dict(version_patch=version_patch_meta))
|
||||
return response
|
||||
|
||||
def delete_version_patch(self, response, result):
|
||||
version_patch_meta = result['version_patch_meta']
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(dict(version_patch=version_patch_meta))
|
||||
return response
|
||||
|
||||
def get_version_patch(self, response, result):
|
||||
version_patch_meta = result['version_patch_meta']
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(dict(version_patch=version_patch_meta))
|
||||
return response
|
||||
|
||||
|
||||
def create_resource():
|
||||
"""versions resource factory method"""
|
||||
deserializer = VersionPatchDeserializer()
|
||||
serializer = VersionPatchserializer()
|
||||
return wsgi.Resource(Controller(), deserializer, serializer)
|
327
code/daisy/daisy/api/v1/versions.py
Normal file
327
code/daisy/daisy/api/v1/versions.py
Normal file
@ -0,0 +1,327 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
/versions endpoint for Daisy v1 API
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from webob.exc import HTTPBadRequest
|
||||
from webob.exc import HTTPConflict
|
||||
from webob.exc import HTTPForbidden
|
||||
from webob.exc import HTTPNotFound
|
||||
from webob import Response
|
||||
|
||||
from daisy.api import policy
|
||||
import daisy.api.v1
|
||||
from daisy.api.v1 import controller
|
||||
from daisy.api.v1 import filters
|
||||
from daisy.common import exception
|
||||
from daisy.common import utils
|
||||
from daisy.common import wsgi
|
||||
from daisy import i18n
|
||||
from daisy import notifier
|
||||
import daisy.registry.client.v1.api as registry
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
_ = i18n._
|
||||
_LE = i18n._LE
|
||||
_LI = i18n._LI
|
||||
_LW = i18n._LW
|
||||
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
|
||||
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
|
||||
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
|
||||
CONF.import_opt('container_formats', 'daisy.common.config',
|
||||
group='image_format')
|
||||
CONF.import_opt('image_property_quota', 'daisy.common.config')
|
||||
|
||||
|
||||
class Controller(controller.BaseController):
|
||||
"""
|
||||
WSGI controller for versions resource in Daisy v1 API
|
||||
|
||||
The versions resource API is a RESTful web role for role data. The API
|
||||
is as follows::
|
||||
|
||||
GET /versions -- Returns a set of brief metadata about versions
|
||||
GET /versions/detail -- Returns a set of detailed metadata about
|
||||
versions
|
||||
HEAD /versions/<ID> -- Return metadata about an role with id <ID>
|
||||
GET /versions/<ID> -- Return role data for role with id <ID>
|
||||
POST /versions -- Store role data and return metadata about the
|
||||
newly-stored role
|
||||
PUT /versions/<ID> -- Update role metadata and/or upload role
|
||||
data for a previously-reserved role
|
||||
DELETE /versions/<ID> -- Delete the role with id <ID>
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.notifier = notifier.Notifier()
|
||||
registry.configure_registry_client()
|
||||
self.policy = policy.Enforcer()
|
||||
|
||||
def _enforce(self, req, action, target=None):
|
||||
"""Authorize an action against our policies"""
|
||||
if target is None:
|
||||
target = {}
|
||||
try:
|
||||
self.policy.enforce(req.context, action, target)
|
||||
except exception.Forbidden:
|
||||
raise HTTPForbidden()
|
||||
|
||||
def _get_filters(self, req):
|
||||
"""
|
||||
Return a dictionary of query param filters from the request
|
||||
|
||||
:param req: the Request object coming from the wsgi layer
|
||||
:retval a dict of key/value filters
|
||||
"""
|
||||
query_filters = {}
|
||||
for param in req.params:
|
||||
if param in SUPPORTED_FILTERS:
|
||||
query_filters[param] = req.params.get(param)
|
||||
if not filters.validate(param, query_filters[param]):
|
||||
raise HTTPBadRequest(_('Bad value passed to filter '
|
||||
'%(filter)s got %(val)s')
|
||||
% {'filter': param,
|
||||
'val': query_filters[param]})
|
||||
return query_filters
|
||||
|
||||
def _get_query_params(self, req):
|
||||
"""
|
||||
Extracts necessary query params from request.
|
||||
|
||||
:param req: the WSGI Request object
|
||||
:retval dict of parameters that can be used by registry client
|
||||
"""
|
||||
params = {'filters': self._get_filters(req)}
|
||||
|
||||
for PARAM in SUPPORTED_PARAMS:
|
||||
if PARAM in req.params:
|
||||
params[PARAM] = req.params.get(PARAM)
|
||||
return params
|
||||
|
||||
@utils.mutating
|
||||
def add_version(self, req, version_meta):
|
||||
"""
|
||||
Adds a new version to Daisy.
|
||||
|
||||
:param req: The WSGI/Webob Request object
|
||||
:param image_meta: Mapping of metadata about role
|
||||
|
||||
:raises HTTPBadRequest if x-role-name is missing
|
||||
"""
|
||||
self._enforce(req, 'add_version')
|
||||
version_name = version_meta.get('name')
|
||||
version_type = version_meta.get('type')
|
||||
if not version_meta.get('status', None):
|
||||
version_meta['status'] = "unused"
|
||||
if not version_name:
|
||||
raise ValueError('version name is null!')
|
||||
if not version_type:
|
||||
raise ValueError('version type is null!')
|
||||
version_meta = registry.add_version_metadata(req.context, version_meta)
|
||||
|
||||
return {'version_meta': version_meta}
|
||||
|
||||
@utils.mutating
|
||||
def delete_version(self, req, id):
|
||||
"""
|
||||
Deletes a role from Daisy.
|
||||
|
||||
:param req: The WSGI/Webob Request object
|
||||
:param image_meta: Mapping of metadata about role
|
||||
|
||||
:raises HTTPBadRequest if x-role-name is missing
|
||||
"""
|
||||
self._enforce(req, 'delete_version')
|
||||
|
||||
self.get_version_meta_or_404(req, id)
|
||||
print "delete_version:%s" % id
|
||||
try:
|
||||
registry.delete_version_metadata(req.context, id)
|
||||
except exception.NotFound as e:
|
||||
msg = (_("Failed to find version to delete: %s") %
|
||||
utils.exception_to_str(e))
|
||||
LOG.warn(msg)
|
||||
raise HTTPNotFound(explanation=msg,
|
||||
request=req,
|
||||
content_type="text/plain")
|
||||
except exception.Forbidden as e:
|
||||
msg = (_("Forbidden to delete role: %s") %
|
||||
utils.exception_to_str(e))
|
||||
LOG.warn(msg)
|
||||
raise HTTPForbidden(explanation=msg,
|
||||
request=req,
|
||||
content_type="text/plain")
|
||||
except exception.InUseByStore as e:
|
||||
msg = (
|
||||
_("version %(id)s could not be deleted because "
|
||||
"it is in use: " "%(exc)s")
|
||||
% {"id": id, "exc": utils.exception_to_str(e)})
|
||||
LOG.warn(msg)
|
||||
raise HTTPConflict(explanation=msg,
|
||||
request=req,
|
||||
content_type="text/plain")
|
||||
else:
|
||||
# self.notifier.info('role.delete', role)
|
||||
return Response(body='', status=200)
|
||||
|
||||
@utils.mutating
|
||||
def get_version(self, req, id):
|
||||
"""
|
||||
Returns metadata about an role in the HTTP headers of the
|
||||
response object
|
||||
|
||||
:param req: The WSGI/Webob Request object
|
||||
:param id: The opaque role identifier
|
||||
|
||||
:raises HTTPNotFound if role metadata is not available to user
|
||||
"""
|
||||
self._enforce(req, 'get_version')
|
||||
version_meta = self.get_version_meta_or_404(req, id)
|
||||
return {'version_meta': version_meta}
|
||||
|
||||
def list_version(self, req):
|
||||
"""
|
||||
Returns list version information for all available versions
|
||||
|
||||
:param req: The WSGI/Webob Request object
|
||||
:retval The response body is a mapping of the following form::
|
||||
|
||||
{'versions': [
|
||||
{'id': <ID>,
|
||||
'name': <NAME>,
|
||||
'description': <DESCRIPTION>,
|
||||
'created_at': <TIMESTAMP>,
|
||||
'updated_at': <TIMESTAMP>,
|
||||
'deleted_at': <TIMESTAMP>|<NONE>,}, ...
|
||||
]}
|
||||
"""
|
||||
self._enforce(req, 'list_version')
|
||||
params = self._get_query_params(req)
|
||||
try:
|
||||
versions = registry.list_version_metadata(req.context, **params)
|
||||
except exception.Invalid as e:
|
||||
raise HTTPBadRequest(explanation=e.msg, request=req)
|
||||
return dict(versions=versions)
|
||||
|
||||
@utils.mutating
|
||||
def update_version(self, req, id, version_meta):
|
||||
"""
|
||||
Updates an existing version with the registry.
|
||||
|
||||
:param request: The WSGI/Webob Request object
|
||||
:param id: The opaque image identifier
|
||||
|
||||
:retval Returns the updated image information as a mapping
|
||||
"""
|
||||
self._enforce(req, 'update_version')
|
||||
orig_version_meta = self.get_version_meta_or_404(req, id)
|
||||
|
||||
if orig_version_meta['deleted']:
|
||||
msg = _("Forbidden to update deleted version.")
|
||||
raise HTTPForbidden(explanation=msg,
|
||||
request=req,
|
||||
content_type="text/plain")
|
||||
try:
|
||||
version_meta = registry.update_version_metadata(req.context,
|
||||
id,
|
||||
version_meta)
|
||||
|
||||
except exception.Invalid as e:
|
||||
msg = (_("Failed to update version metadata. Got error: %s") %
|
||||
utils.exception_to_str(e))
|
||||
LOG.warn(msg)
|
||||
raise HTTPBadRequest(explanation=msg,
|
||||
request=req,
|
||||
content_type="text/plain")
|
||||
except exception.NotFound as e:
|
||||
msg = (_("Failed to find version to update: %s") %
|
||||
utils.exception_to_str(e))
|
||||
LOG.warn(msg)
|
||||
raise HTTPNotFound(explanation=msg,
|
||||
request=req,
|
||||
content_type="text/plain")
|
||||
except exception.Forbidden as e:
|
||||
msg = (_("Forbidden to update version: %s") %
|
||||
utils.exception_to_str(e))
|
||||
LOG.warn(msg)
|
||||
raise HTTPForbidden(explanation=msg,
|
||||
request=req,
|
||||
content_type="text/plain")
|
||||
except (exception.Conflict, exception.Duplicate) as e:
|
||||
LOG.warn(utils.exception_to_str(e))
|
||||
raise HTTPConflict(body=_('Host operation conflicts'),
|
||||
request=req,
|
||||
content_type='text/plain')
|
||||
else:
|
||||
self.notifier.info('version.update', version_meta)
|
||||
|
||||
return {'version_meta': version_meta}
|
||||
|
||||
|
||||
class VersionDeserializer(wsgi.JSONRequestDeserializer):
|
||||
"""Handles deserialization of specific controller method requests."""
|
||||
|
||||
def _deserialize(self, request):
|
||||
result = {}
|
||||
result["version_meta"] = utils.get_dict_meta(request)
|
||||
return result
|
||||
|
||||
def add_version(self, request):
|
||||
return self._deserialize(request)
|
||||
|
||||
def update_version(self, request):
|
||||
return self._deserialize(request)
|
||||
|
||||
|
||||
class Versionserializer(wsgi.JSONResponseSerializer):
|
||||
"""Handles serialization of specific controller method responses."""
|
||||
|
||||
def __init__(self):
|
||||
self.notifier = notifier.Notifier()
|
||||
|
||||
def add_version(self, response, result):
|
||||
version_meta = result['version_meta']
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(dict(version=version_meta))
|
||||
return response
|
||||
|
||||
def delete_version(self, response, result):
|
||||
version_meta = result['version_meta']
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(dict(version=version_meta))
|
||||
return response
|
||||
|
||||
def get_version(self, response, result):
|
||||
version_meta = result['version_meta']
|
||||
response.status = 201
|
||||
response.headers['Content-Type'] = 'application/json'
|
||||
response.body = self.to_json(dict(version=version_meta))
|
||||
return response
|
||||
|
||||
|
||||
def create_resource():
|
||||
"""versions resource factory method"""
|
||||
deserializer = VersionDeserializer()
|
||||
serializer = Versionserializer()
|
||||
return wsgi.Resource(Controller(), deserializer, serializer)
|
Loading…
x
Reference in New Issue
Block a user