initial merge

Change-Id: Id7cef7826092e191654da872ee1e11c4c6f50ddf
Signed-off-by: Zhijiang Hu <hu.zhijiang@zte.com.cn>
This commit is contained in:
Zhijiang Hu 2016-03-30 14:07:23 +08:00
parent 8f69c4bcc6
commit e2e358b4f8
1770 changed files with 453295 additions and 0 deletions

3
backend/proton/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
# Ignore everything in this directory
*
# Except this file !.gitignore

166
backend/tecs/HA.conf Executable file
View File

@ -0,0 +1,166 @@
## HA配置双机
# 每套HA系统配置一个配置文件该文件名命令规律如下一套HA为HA_1.conf两套HA命令格式为HA_2_1.conf和HA_2_2.conf依次类推
# 建议拷贝该模版改名后再编辑如使用vi命令应先执行 export LC_ALL="zh_CN.GB2312" 否则会有乱码编辑后unset LC_ALL
[DEFAULT]
# HA安装的OpenCOS组件, 可以填写为loadbalance,database,amqp,keystone,neutron,glance,cinder,nova,horizon,heat,ceilometer,ironic与下面组件服务列表的关键字一致
# 之中的任意组合,用逗号分开, 全部可简写为all, 无顺序要求haproxy代表配置LB.
# 注意HA是通过conf方式安装的但这种方式不支持安装ironic如果这里配置了ironic应在整个安装流程前手动通过custom方式单独安装ironic
# 该配置项必填
components=database,amqp,keystone,neutron,glance,cinder,nova,horizon,heat,ceilometer
# 由HA管理的组件服务(可裁剪),多个服务以逗号分开.
# 一般对服务无增加或减少可不必修改如下选项多余组件也无需注释掉组件选择与否由“components”决定
loadbalance = haproxy
database=mariadb
amqp=rabbitmq-server
keystone=openstack-keystone
#neutron-metadata-agent,neutron-lbaas-agent don't use default
neutron=neutron-server,neutron-l3-agent,neutron-dhcp-agent
#openstack-glance-scrubber don't use default
glance=openstack-glance-api,openstack-glance-registry
#openstack-cinder-backup don't use default
cinder=openstack-cinder-api,openstack-cinder-scheduler,openstack-cinder-volume
nova=openstack-nova-api,openstack-nova-conductor,openstack-nova-scheduler,openstack-nova-cert,openstack-nova-consoleauth,openstack-nova-novncproxy
horizon=httpd,opencos-alarmmanager,opencos-alarmagent
heat=openstack-heat-api,openstack-heat-engine,openstack-heat-api-cfn,openstack-heat-api-cloudwatch
ceilometer=openstack-ceilometer-api,openstack-ceilometer-central,openstack-ceilometer-alarm-evaluator,openstack-ceilometer-alarm-notifier,openstack-ceilometer-notification,openstack-ceilometer-collector
ironic=openstack-ironic-api,openstack-ironic-conductor
# 根据业务需要增加clone服务资源(每个节点都运行),填写去掉.service后的服务名多个服务以逗号分隔,可选
#clone_service=
# guard服务名字
guard=tfg-guard
# HA集群心跳线至少一条建议三条每条是一对IP用逗号分开
# 如果LB和HA是使用相同服务器则此处心跳线不用再填写
# 第一条心跳线例中是外网IP必填
heartbeat_link1=10.43.179.221,10.43.179.222
# 第二条心跳线不能与其他心跳线有相同IP可选
heartbeat_link2=
# 第三条心跳线不能与其他心跳线有相同IP可选
heartbeat_link3=
#执行HA脚本的节点为local node其他节点为remote node这里为ssh登录remote node的root用户密码必填
remote_node_password=ossdbg1
# haproxy浮动IP地址,配置LB时必填
#loadbalance_fip=192.160.0.226
#loadbalance_nic=ens33
#loadbalance_netmask=23
#############DB################
# 数据库浮动IP可以与LB浮动IP相同必填
# 浮动IP地址
#database_fip=192.160.0.225
# 浮动IP所在网卡
#database_nic=baseleft
# 掩码CIDR格式
#database_netmask=23
# 数据库共享磁盘全路径名,组件存在则必填
# 磁盘名建议用lv方式使用lv时应注意配置为逻辑盘名
#database_device=/dev/mapper/vg_mysql-lv_mysql
# 文件系统类型
#database_fs_type=ext4
#数据库备份共享磁盘全路径名,不能和其他共享磁盘相同(功能暂不支持),可选
#backup_database_device=/dev/mapper/vg_mysqlbackup-lv_mysqlbackup
#backup_database_fs_type=ext4
##############AMQP################
# AMQP浮动IP可以与LB浮动IP相同必填
#amqp_fip=192.160.0.225
#amqp_nic=baseleft
#amqp_netmask=23
##############keystone################
# keystone浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#keystone_fip=192.160.0.225
#keystone_nic=baseleft
#keystone_netmask=23
##############neutron################
# neutron 浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#neutron_fip=192.160.0.225
#neutron_nic=baseleft
#neutron_netmask=23
##############glance################
# glance 浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#glance_fip=192.160.0.225
#glance_nic=baseleft
#glance_netmask=23
# 镜像共享磁盘设置,不能和其他共享磁盘相同,组件存在则必填
# glance_device_type可选drbd或iscsi
#glance_device_type=drbd
#glance_device=/dev/mapper/vg_glance-lv_glance
#glance_fs_type=ext4
##############cinder################
# cinder浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#cinder_fip=192.160.0.225
#cinder_nic=baseleft
#cinder_netmask=23
#虚拟机块设备使用的磁阵管理口IP,如果有多个IP用空格分开可选
#cinder_ping_ip=192.160.0.7
##############nova################
# nova浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#nova_fip=192.160.0.225
#nova_nic=baseleft
#nova_netmask=23
##############horizon################
# TECS dashboard登录时使用的浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
# 不同浮动IP的组件可以运行在不同节点上如果还想与
# 某个组件运行在相同节点需配置location_constraint
#horizon_fip=10.43.179.230
#horizon_nic=kmportv1
#horizon_netmask=23
##############ironic################
# ironic 浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#ironic_fip=192.160.0.225
#ironic_nic=baseleft
#ironic_netmask=23
##############heat################
# heat 浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#heat_fip=192.160.0.225
#heat_nic=baseleft
#heat_netmask=23
##############ceilometer################
# ceilometer浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#ceilometer_fip=192.160.0.225
#ceilometer_nic=baseleft
#ceilometer_netmask=23
# mongod数据库共享磁盘全路径名建议配置
#mongod_device=/dev/mapper/vg_mongodb-lv_mongodb
# 文件系统类型
#mongod_fs_type=ext4
# 若mongod数据库使用本地盘则配置成local否则为空
mongod_local=local
# 如下两个配置项表示共享盘所在的磁阵信息,暂时仅支持本配置中用到的所有共享盘都在一个磁阵上,可选
# 参数说明:(主控制器业务口IP地址,主控制器iqn),(备控制器业务口IP地址,备控制器iqn)
# 如果两个控制iqn相同可以配置为(主控制器业务口IP地址,主控制器iqn)
#iscsi_storage=(172.32.1.1,iqn.2099-01.cn.com.zte:usp.spr11-4c:09:b4:b0:56:8b),(172.32.1.2,iqn.2099-01.cn.com.zte:usp.spr11-4c:09:b4:b0:56:8c)

159
backend/tecs/getnodeinfo.sh Executable file
View File

@ -0,0 +1,159 @@
#!/bin/bash
dhcp_ip="127.0.0.1"
DISCOVERD_URL="http://$dhcp_ip:5050/v1/continue"
function update() {
jq "$1" data.json > temp.json || echo "Error: update $1 to json failed"
mv temp.json data.json
}
function get_system_info(){
PRODUCT=$(dmidecode -s system-product-name)
FAMILY=$(dmidecode -t system|grep "Family"|cut -d ":" -f2)
VERSION=$(dmidecode -s system-version)
SERIAL=$(dmidecode -s system-serial-number)
MANUFACTURER=$(dmidecode -s system-manufacturer)
UUID=$(dmidecode -s system-uuid)
FQDN=$(hostname -f)
echo '{"system":{}}' > data.json
update ".system[\"product\"] = \"$PRODUCT\""
update ".system[\"family\"] = \"$FAMILY\""
update ".system[\"fqdn\"] = \"$FQDN\""
update ".system[\"version\"] = \"$VERSION\""
update ".system[\"serial\"] = \"$SERIAL\""
update ".system[\"manufacturer\"] = \"$MANUFACTURER\""
update ".system[\"uuid\"] = \"$UUID\""
}
function get_cpu_info(){
REAL=$(cat /proc/cpuinfo |grep "physical id"|sort |uniq|wc -l)
TOTAL=$(cat /proc/cpuinfo |grep "processor"|wc -l)
update ".cpu[\"real\"] = $REAL"
update ".cpu[\"total\"] = $TOTAL"
for i in $(seq $TOTAL)
do
if [ ! -z "$i" ]; then
SPEC_MODEL=$(cat /proc/cpuinfo | grep name | cut -f2 -d:|sed -n $i"p")
SPEC_FRE=$(cat /proc/cpuinfo | grep MHz | cut -f2 -d:|sed -n $i"p")
update ".cpu[\"spec_$i\"] = {model:\"$SPEC_MODEL\", frequency:$SPEC_FRE}"
fi
done
}
function get_memory_info(){
PHY_NUM=$(dmidecode -t memory|grep "Physical Memory Array"|wc -l)
TOTAL_MEM=$(cat /proc/meminfo |grep MemTotal |cut -d ":" -f2)
update ".memory[\"total\"] = \"$TOTAL_MEM\""
for num in $(seq $PHY_NUM)
do
SLOTS=$(dmidecode -t memory |grep "Number Of Devices" |cut -d ":" -f2|sed -n $num"p")
MAX_CAP=$(dmidecode -t memory |grep "Maximum Capacity" |cut -d ":" -f2|sed -n $num"p")
update ".memory[\"phy_memory_$num\"] = {slots:\"$SLOTS\", maximum_capacity:\"$MAX_CAP\"}"
for i in $(seq $SLOTS)
do
if [ ! -z "$i" ]; then
DEVICE_FRE=$(dmidecode -t memory |grep "Speed" |cut -d ":" -f2|sed -n $i"p")
DEVICE_TYPE=$(dmidecode -t memory |grep 'Type:' |grep -v "Error Correction Type"|cut -d ":" -f2|sed -n $i"p")
DEVICE_SIZE=$(dmidecode -t memory |grep Size |cut -d ":" -f2|sed -n $i"p")
update ".memory[\"phy_memory_$num\"][\"devices_$i\"] = {frequency:\"$DEVICE_FRE\", type:\"$DEVICE_TYPE\", size:\"$DEVICE_SIZE\"}"
fi
done
done
}
function get_net_info(){
physical_networks=`ls -l /sys/class/net/ | grep -v lo |grep "pci"|awk -F 'net/' '{print $2}'`
if [ -f "/sys/class/net/bonding_masters" ]; then
bond_network=$(cat /sys/class/net/bonding_masters)
if [ ! -z "$bond_network" ];then
physical_networks+=" $bond_network"
fi
fi
for iface in $physical_networks
do
NAME=$iface
MAC=$(ip link show $iface | awk '/ether/ {print $2}')
IP=$(ip addr show $iface | awk '/inet / { sub(/\/.*/, "", $2); print $2 }')
NETMASK=$(ifconfig $iface | grep netmask | awk '{print $4}')
STATE=$(ip link show $iface | awk '/mtu/ {print $3}')
PCI=$(ethtool -i $iface|grep "bus-info"|cut -d " " -f2)
CURRENT_SPEED=$(ethtool $iface |grep Speed |awk -F " " '{print $2}')
LINE=$(ethtool $iface|grep -n "Supported pause frame use"|awk -F ":" '{print $1}')
LINE=$[ LINE - 1 ]
LINE_SPEED=$(ethtool $iface|grep -n "Supported link modes"|awk -F ":" '{print $1}')
BOND=$(ifconfig $iface | grep MASTER)
if [ $LINE -eq $LINE_SPEED ]; then
MAX_SPEED=$(ethtool $iface|grep "Supported link modes"|cut -d ":" -f2)
else
MAX_SPEED=$(ethtool $iface |sed -n $LINE"p"|awk -F " " '{print $1}')
fi
UP="UP"
if [[ "$STATE" =~ "$UP" ]]; then
STATE="up"
else
STATE="down"
fi
if [ -z "$BOND" ]; then
TYPE="ether"
else
TYPE="bond"
SLAVES=$(find /etc/sysconfig/network-scripts/ -name "ifcfg-*" |xargs grep "MASTER=$iface"|awk -F 'ifcfg-' '{print $2}'|awk -F ':' '{print $1}')
fi
if [ ! -z "$MAC" ]; then
update ".interfaces[\"$iface\"] = {mac: \"$MAC\", ip: \"$IP\", netmask: \"$NETMASK\", name: \"$iface\", max_speed: \"$MAX_SPEED\", state: \"$STATE\", pci: \"$PCI\", current_speed: \"$CURRENT_SPEED\", type: \"$TYPE\", slaves:\"$SLAVES\"}"
fi
done
}
function get_disk_info(){
for disk in $(fdisk -l|grep Disk|grep "/dev" |cut -d ":" -f1|awk -F "/" '{print $NF}')
do
DISK_NAME=$disk
DISK_SIZE=$(fdisk -l|grep Disk|grep "/dev" |grep -w $disk|cut -d "," -f2)
DISK_DISK=$(ls -l /dev/disk/by-path/|grep $disk"$"|awk '{print $9}')
DISK_EXTRA_1=$(ls -l /dev/disk/by-id/|grep $disk"$"|awk '{print $9}'|sed -n 1p)
DISK_EXTRA_2=$(ls -l /dev/disk/by-id/|grep $disk"$"|awk '{print $9}'|sed -n 2p)
MODEL=$(hdparm -I /dev/sda |grep Model | cut -d ":" -f2)
REMOVABLE=$(hdparm -I /dev/sda |grep removable|awk '{print $4}')
update ".disk[\"$disk\"] = {name: \"$DISK_NAME\", size: \"$DISK_SIZE\", disk: \"$DISK_DISK\", model: \"$MODEL\", removable: \"$REMOVABLE\",extra: [\"$DISK_EXTRA_1\", \"$DISK_EXTRA_2\"]}"
done
}
function main(){
get_system_info
get_cpu_info
get_memory_info
get_net_info
get_disk_info
}
main
BMC_ADDRESS=$(ipmitool lan print | grep -e "IP Address [^S]" | awk '{ print $4 }')
if [ -z "$BMC_ADDRESS" ]; then
BMC_ADDRESS=$(ipmitool lan print 3| grep -e "IP Address [^S]" | awk '{ print $4 }')
fi
update ".ipmi_address = \"$BMC_ADDRESS\""
update ".data_name = \"baremetal_source\""
update ".os_status = \"active\""
echo Collected:
cat data.json
RESULT=$(eval curl -i -X POST \
"-H 'Accept: application/json'" \
"-H 'Content-Type: application/json'" \
"-d @data.json" \
"$DISCOVERD_URL")
if echo $RESULT | grep "HTTP/1.0 4"; then
echo "Ironic API returned error: $RESULT"
fi
echo "Node is now discovered! Halting..."
sleep 5

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,39 @@
[
{
"protocol_type": "ISCSI",
"service": "glance",
"lun": "0",
"data_ips": [
"10.43.177.159"
],
"lvm_config": {
"size": 100,
"vg_name": "VolGroupHAImage",
"lv_name": "lvHAImage",
"fs_type": "ext4"
}
},
{
"protocol_type": "ISCSI",
"service": "db",
"lun": "1",
"data_ips": [
"162.1.1.101"
],
"lvm_config": {
"size": 100,
"vg_name": "VolGroupHAMysql",
"lv_name": "lvHAMysql",
"fs_type": "ext4"
}
},
{
"protocol_type": "CEPH",
"rbd_config": {
"size": 100,
"rbd_pool": "mysql",
"rbd_volume": "mysql",
"fs_type": "ext4" # can be none
}
}
]

View File

@ -0,0 +1,39 @@
[
{
"protocol_type": "ISCSI",
"service": "glance",
"lun": "0",
"data_ips": [
"10.43.177.159"
],
"lvm_config": {
"size": 100,
"vg_name": "VolGroupHAImage",
"lv_name": "lvHAImage",
"fs_type": "ext4"
}
},
{
"protocol_type": "ISCSI",
"service": "db",
"lun": "1",
"data_ips": [
"162.1.1.101"
],
"lvm_config": {
"size": 100,
"vg_name": "VolGroupHAMysql",
"lv_name": "lvHAMysql",
"fs_type": "ext4"
}
},
{
"protocol_type": "CEPH",
"rbd_config": {
"size": 100,
"rbd_pool": "mysql",
"rbd_volume": "mysql",
"fs_type": "ext4" # can be none
}
}
]

View File

@ -0,0 +1,144 @@
# This is a basic configuration file with some examples, for device mapper
# mulead of using WWIDs as names.
defaults {
user_friendly_names yes
queue_without_daemon no
# find_multipaths yes
}
##
## Here is an example of how to configure some standard options.
##
#
#defaults {
# udev_dir /dev
# polling_interval 10
# selector "round-robin 0"
# path_grouping_policy multibus
# getuid_callout "/lib/udev/scsi_id --whitelisted --device=/dev/%n"
# prio alua
# path_checker readsector0
# rr_min_io 100
# max_fds 8192
# rr_weight priorities
# failback immediate
# no_path_retry fail
# user_friendly_names yes
#}
##
## The wwid line in the following blacklist section is shown as an example
## of how to blacklist devices by wwid. The 2 devnode lines are the
## compiled in default blacklist. If you want to blacklist entire types
## of devices, such as all scsi devices, you should use a devnode line.
## However, if you want to blacklist specific devices, you should use
## a wwid line. Since there is no guarantee that a specific device will
## not change names on reboot (from /dev/sda to /dev/sdb for example)
## devnode lines are not recommended for blacklisting specific devices.
##
#blacklist {
# wwid 26353900f02796769
# devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*"
# devnode "^hd[a-z]"
#}
#multipaths {
# multipath {
# wwid 3600508b4000156d700012000000b0000
# alias yellow
# path_grouping_policy multibus
# path_checker readsector0
# path_selector "round-robin 0"
# failback manual
# rr_weight priorities
# no_path_retry 5
# }
# multipath {
# wwid 1DEC_____321816758474
# alias red
# }
#}
#devices {
# device {
# vendor "COMPAQ "
# product "HSV110 (C)COMPAQ"
# path_grouping_policy multibus
# getuid_callout "/lib/udev/scsi_id --whitelisted --device=/dev/%n"
# path_checker readsector0
# path_selector "round-robin 0"
# hardware_handler "0"
# failback 15
# rr_weight priorities
# no_path_retry queue
# }
# device {
# vendor "COMPAQ "
# product "MSA1000 "
# path_grouping_policy multibus
# }
#}
devices {
device {
vendor "FUJITSU"
product "ETERNUS_DXL"
prio alua
path_grouping_policy group_by_prio
path_selector "round-robin 0"
failback immediate
no_path_retry 0 (*1)
path_checker tur
dev_loss_tmo 2097151 (*2)
fast_io_fail_tmo 1
}
device {
vendor "FUJITSU"
product "ETERNUS_DXM"
prio alua
path_grouping_policy group_by_prio
path_selector "round-robin 0"
failback immediate
no_path_retry 0 (*1)
path_checker tur
dev_loss_tmo 2097151 (*2)
fast_io_fail_tmo 1
}
device {
vendor "FUJITSU"
product "ETERNUS_DX400"
prio alua
path_grouping_policy group_by_prio
path_selector "round-robin 0"
failback immediate
no_path_retry 0 (*1)
path_checker tur
dev_loss_tmo 2097151 (*2)
fast_io_fail_tmo 1
}
device {
vendor "FUJITSU"
product "ETERNUS_DX8000"
prio alua
path_grouping_policy group_by_prio
path_selector "round-robin 0"
failback immediate
no_path_retry 0 (*1)
path_checker tur
dev_loss_tmo 2097151 (*2)
fast_io_fail_tmo 1
}
device {
vendor "ZTE"
product "ZXUSP"
path_grouping_policy group_by_prio
path_checker tur
prio alua
path_selector "round-robin 0"
hardware_handler "1 alua"
failback immediate
rr_weight priorities
no_path_retry 0 (*1)
rr_min_io_rq 1
flush_on_last_del yes
}
}
blacklist {
}

View File

@ -0,0 +1,281 @@
import uuid
from utils import *
from xml.etree.ElementTree import ElementTree, Element
class BaseConfig():
_CINDER_CONF_PATH = "/etc/cinder/cinder.conf"
SET_CONFIG = \
"openstack-config --set {config_file} {section} {key} {value}"
GET_CONFIG = \
"openstack-config --get {config_file} {section} {key}"
instance = None
def __init__(self):
self._BACKEND_MAPPING = {
'KS3200_IPSAN': ZTEBackendConfig,
'KS3200_FCSAN': ZTEBackendConfig,
'FUJISTU_ETERNUS': FUJISTUBackendConfig,
'LVM': None,
'CEPH': CEPHBackendConfig,
}
self.instance_mapping = {}
def __get_backend_instance(self, backend_type):
if not backend_type or \
backend_type not in self._BACKEND_MAPPING.keys():
print_or_raise("Volume driver type '%s' is not valid." %
backend_type,
ScriptInnerError)
backend_instance = self.instance_mapping.get(backend_type, BaseConfig)
if isinstance(backend_instance, self._BACKEND_MAPPING[backend_type]):
return backend_instance
else:
self.instance_mapping.update(
{backend_type: self._BACKEND_MAPPING[backend_type]()})
return self.instance_mapping[backend_type]
@classmethod
def single_instance(cls):
if not BaseConfig.instance:
BaseConfig.instance = BaseConfig()
return BaseConfig.instance
def _construct_particular_cinder_data(self, backend, backend_data):
print_or_raise("Backend _construct_particular_cinder_data method no "
"implement!", ScriptInnerError)
def _write_xml(self, fp_xml, **backend_device_args):
self.backend_instance._write_xml(fp_xml, **backend_device_args)
def _construct_commonality_cinder_data(self, backend, backend_data):
backend_pools, xml_path = \
self.backend_instance._construct_particular_cinder_data(
backend, backend_data)
backend_data['volume_backend_name'] = \
backend_data.pop('volume_type')
set_backend = lambda x, y: self.SET_CONFIG.format(
config_file=self._CINDER_CONF_PATH,
section=backend,
key=x, value=y)
backend_config_list = list()
backend_config_list += map(
set_backend, backend_data.keys(), backend_data.values())
get_bakcends = \
self.GET_CONFIG.format(config_file=self._CINDER_CONF_PATH,
section="DEFAULT",
key="enabled_backends")
out, err = execute(get_bakcends, check_exit_code=[0, 1])
exist_backends = out.split("\n")[0] if out else ""
enabled_backends = \
exist_backends if backend in exist_backends else \
"%s" % backend if not out else "%s,%s" % \
(exist_backends, backend)
set_bakcends = \
self.SET_CONFIG.format(config_file=self._CINDER_CONF_PATH,
section="DEFAULT",
key="enabled_backends",
value=enabled_backends)
# write to cinder.conf
config_set_all = set_bakcends + ";" + ";".join(backend_config_list)
execute(config_set_all)
return backend_pools, xml_path
def is_needed_generate_backend_xml(self, backend_driver):
if backend_driver in ['KS3200_IPSAN', 'KS3200_FCSAN',
'FUJISTU_ETERNUS']:
return True
else:
return False
def config_backend(self, backend_cinder_args, **backend_device_args):
"""
Config outer interface,for public flow.
:param backend_device_args: device config
:param backend_cinder_args: backend config
:return:
"""
backend_data = backend_cinder_args[1]
backend_driver = backend_data.get('volume_driver', None)
self.backend_instance = self.__get_backend_instance(backend_driver)
# config cinder.conf
backend_pools, xml_path = \
self._construct_commonality_cinder_data(backend_cinder_args[0],
backend_data)
# config xml
if self.is_needed_generate_backend_xml(backend_driver):
backend_device_args.update({'pools': backend_pools})
with open(xml_path, "w+") as fp_xml:
self._write_xml(fp_xml, **backend_device_args)
execute("chown cinder:cinder %s" % xml_path)
def update_xml_node(self, element_obj, node_path, content):
node_list = element_obj.findall(node_path)
if node_list:
node_list[0].text = content
else:
new_element = Element(node_path.split('/')[-1])
new_element.text = content
parent_node = element_obj.findall(node_path.split('/')[0])
parent_node[0].append(new_element)
class ZTEBackendConfig(BaseConfig):
_DEFAULT_USERNAME = "admin"
_DEFAULT_USERPWD = "admin"
_DEFAULT_XML_FILE_PREFIX = "cinder_zte_conf_file"
_DEFAULT_XML_TEMPLATE_PATH = "/etc/cinder/cinder_zte_conf.xml"
_ISCSI_DRIVER = 'cinder.volume.drivers.zte.zte_ks.ZteISCSIDriver'
_FC_DRIVER = 'cinder.volume.drivers.zte.zte_ks.ZteFCDriver'
def _construct_particular_cinder_data(self, backend, backend_data):
# construct commonality data in cinder.conf
backend_data['volume_driver'] = \
self._ISCSI_DRIVER \
if "KS3200_IPSAN" == backend_data['volume_driver'] \
else self._FC_DRIVER
backend_data[self._DEFAULT_XML_FILE_PREFIX] = \
backend_data.pop('backend_config_file') \
if backend_data.get('backend_config_file', None) \
else "/etc/cinder/%s_%s.xml" % (self._DEFAULT_XML_FILE_PREFIX,
backend)
backend_data['use_multipath_for_image_xfer'] = \
backend_data.get('multipath_tool', True)
backend_pools = backend_data.pop('pools')
return backend_pools, backend_data[self._DEFAULT_XML_FILE_PREFIX]
def _write_xml(self, fp, **backend_device_args):
if not os.path.exists(self._DEFAULT_XML_TEMPLATE_PATH):
print_or_raise("XML file template %s not exists,can't load defult "
"params." % self._DEFAULT_XML_TEMPLATE_PATH,
ScriptInnerError)
mgnt_ips = backend_device_args['management_ips']
user_name = backend_device_args['user_name']
user_pwd = backend_device_args['user_pwd']
cinder_host_ip = backend_device_args['cinder_host_ip']
pools = backend_device_args['pools']
xml_fp = fp
tree = ElementTree()
elements = tree.parse(self._DEFAULT_XML_TEMPLATE_PATH)
for index in range(len(mgnt_ips)):
self.update_xml_node(
elements,
"Storage/ControllerIP" + str(index), mgnt_ips[index])
if cinder_host_ip:
self.update_xml_node(elements, "Storage/LocalIP", cinder_host_ip)
self.update_xml_node(elements, "Storage/UserName", user_name)
self.update_xml_node(elements, "Storage/UserPassword", user_pwd)
# del all StoragePool and StorageVd node
pool_parent_node = elements.findall("LUN")
pool_child_nodes = elements.findall("LUN/StoragePool")
vd_child_nodes = elements.findall("LUN/StorageVd")
map(pool_parent_node[0].remove, pool_child_nodes + vd_child_nodes)
# add StoragePool node base on pools
for pool in pools:
element = Element("StoragePool")
element.text = pool
element.tail = "\n\t"
pool_parent_node[0].insert(0, element)
tree.write(xml_fp, encoding="utf-8", xml_declaration=True)
class FUJISTUBackendConfig(BaseConfig):
_DEFAULT_USERNAME = "root"
_DEFAULT_USERPWD = "root"
_DEFAULT_XML_FILE_PREFIX = "cinder_eternus_config_file"
_DEFAULT_XML_TEMPLATE_PATH = \
"/etc/cinder/cinder_fujitsu_eternus_dx.xml"
FUJISTU_DRIVER = \
"cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver"
def _construct_particular_cinder_data(self, backend, backend_data):
# construct commonality data in cinder.conf
backend_data['volume_driver'] = self.FUJISTU_DRIVER
backend_data[self._DEFAULT_XML_FILE_PREFIX] = \
backend_data.pop('backend_config_file') \
if backend_data.get('backend_config_file', None) \
else "/etc/cinder/%s_%s.xml" % (self._DEFAULT_XML_FILE_PREFIX,
backend)
backend_data['use_multipath_for_image_xfer'] = \
backend_data.get('multipath_tool', True)
backend_data['use_fujitsu_image_volume'] = \
backend_data.get('use_fujitsu_image_volume', True)
backend_data['fujitsu_min_image_volume_per_storage'] = \
backend_data.get('fujitsu_min_image_volume_per_storage', 1)
backend_data['fujitsu_image_management_dir'] = \
backend_data.get('fujitsu_image_management_dir',
'/var/lib/glance/conversion')
backend_pools = backend_data.pop('pools')
return backend_pools, backend_data[self._DEFAULT_XML_FILE_PREFIX]
def _write_xml(self, fp, **backend_device_args):
if not os.path.exists(self._DEFAULT_XML_TEMPLATE_PATH):
print_or_raise("XML file template %s not exists,can't load defult "
"params." % self._DEFAULT_XML_TEMPLATE_PATH,
ScriptInnerError)
mgnt_ip = backend_device_args['management_ips'][0]
data_ips = backend_device_args['data_ips']
user_name = backend_device_args['user_name']
user_pwd = backend_device_args['user_pwd']
pool = backend_device_args['pools'][0]
xml_fp = fp
tree = ElementTree()
elements = tree.parse(self._DEFAULT_XML_TEMPLATE_PATH)
self.update_xml_node(elements, "EternusIP", mgnt_ip)
self.update_xml_node(elements, "EternusUser", user_name)
self.update_xml_node(elements, "EternusPassword", user_pwd)
self.update_xml_node(elements, "EternusPool", pool)
self.update_xml_node(elements, "EternusSnapPool", pool)
root = tree.getroot()
map(root.remove, root.findall("EternusISCSIIP"))
for ip in data_ips:
element = Element("EternusISCSIIP")
element.text = ip
element.tail = "\n"
root.insert(4, element)
# root.append(element)
tree.write(xml_fp, encoding="utf-8", xml_declaration=True)
class CEPHBackendConfig(BaseConfig):
NOVA_CONF_FILE = "/etc/nova/nova.conf"
GLANCE_API_CONF_FILE = "/etc/glance/glance-api.conf"
_RBD_STORE_USER = "cinder"
_RBD_POOL = "volumes"
_RBD_MAX_CLONE_DEPTH = 5
_RBD_FLATTEN_VOLUME_FROM_SNAPSHOT = "False"
_RBD_CEPH_CONF = "/etc/ceph/ceph.conf"
_RBD_DRIVER = 'cinder.volume.drivers.rbd.RBDDriver'
def _construct_particular_cinder_data(self, backend, backend_data):
backend_data['volume_driver'] = self._RBD_DRIVER
backend_data['rbd_pool'] = self._RBD_POOL
backend_data['rbd_max_clone_depth'] = self._RBD_MAX_CLONE_DEPTH
backend_data['rbd_flatten_volume_from_snapshot'] = \
self._RBD_FLATTEN_VOLUME_FROM_SNAPSHOT
backend_data['rbd_ceph_conf'] = self._RBD_CEPH_CONF
uuid_instance = uuid.uuid3(uuid.NAMESPACE_DNS, "zte.com.cn")
backend_data['rbd_secret_uuid'] = uuid_instance.urn.split(":")[2]
return [], []

View File

@ -0,0 +1,312 @@
from utils import *
class BaseShareDisk():
instance = None
def __init__(self):
self._PROTOCOL_MAPPING = {
'ISCSI': ISCSIShareDisk,
'CEPH': CEPHShareDisk
}
self.instance_mapping = {}
def __get_protocol_instance(self, protocol_type):
if not protocol_type or \
protocol_type not in self._PROTOCOL_MAPPING.keys():
print_or_raise("Protocol type '%s' is not valid." % protocol_type,
ScriptInnerError)
protocol_instance = self.instance_mapping.get(protocol_type,
BaseShareDisk)
if isinstance(protocol_instance,
self._PROTOCOL_MAPPING[protocol_type]):
return protocol_instance
else:
self.instance_mapping.update(
{protocol_type: self._PROTOCOL_MAPPING[protocol_type]()})
return self.instance_mapping[protocol_type]
@classmethod
def single_instance(cls):
if not BaseShareDisk.instance:
BaseShareDisk.instance = BaseShareDisk()
return BaseShareDisk.instance
def deploy_share_disk(self, item, host_name):
protocol_instance = self.__get_protocol_instance(
item.get('protocol_type', 'ISCSI'))
protocol_instance.deploy_share_disk(item, host_name)
class ISCSIShareDisk(BaseShareDisk):
_LV_DEFAULT_NAME = {
'glance': ("VolGroupHAImage", "lvHAImage", 254),
'db': ("VolGroupHAMysql", "lvHAMysql", 253),
'db_backup': ("VolGroupHABakMysql", "lvHABakMysql", 252),
'mongodb': ("VolGroupHAMongodb", "lvHAMongodb", 251),
}
def _get_iscsi_configs(self, record_list):
raid_config = {}
for record in record_list:
discovery_media_ip = record.split(" ")[0].split(":")[0]
discovery_media_iqn = record.split(" ")[1]
try:
execute("ping -c 1 -W 2 %s" % discovery_media_ip)
except ProcessExecutionError:
execute("iscsiadm -m node -T %s -p %s -o delete" %
(discovery_media_iqn, discovery_media_ip),
check_exit_code=[0, 1])
continue
if discovery_media_ip in raid_config.get(discovery_media_iqn, []):
execute("iscsiadm -m node -T %s -p %s -R" %
(discovery_media_iqn, discovery_media_ip),
check_exit_code=[0, 1])
elif discovery_media_iqn in raid_config.keys():
raid_config[discovery_media_iqn] += [discovery_media_ip]
else:
raid_config[discovery_media_iqn] = [discovery_media_ip]
print_or_raise("Raid config is:\n%s" % str(raid_config))
return raid_config
def _lv_reentrant_check(
self, vg_name, lv_name, iscsi_session_setup, lun=None,
data_ips=[]):
"""
Check if share disk operation is reentrant.
:return:True,continue follow action; False, do nothing.
"""
lv_device_path = "/dev/%s/%s" % (vg_name, lv_name)
if not os.path.exists(lv_device_path):
return True
if not iscsi_session_setup:
exist_volumes = \
[sd for sd in self._ls_sd_path() if "-lun-" + lun in sd
for ip in data_ips if "ip-" + ip in sd]
if not exist_volumes:
print_or_raise("Lvm %s is exist, but no sd device match!" %
lv_device_path, ScriptInnerError)
return False
def _lv_rollback(self, lv, vg, block_device):
try:
execute("lvremove -y -ff /dev/%s/%s" % (lv, vg),
check_exit_code=[0, 1, 5])
execute("vgremove -y -ff %s" % vg, check_exit_code=[0, 1, 5])
execute("pvremove -y -ff %s" % block_device,
check_exit_code=[0, 1, 5])
except Exception as e:
print_or_raise("Rollback lvm resource failed!", e)
def _establish_iscsi_session(self, available_data_ips):
# discovery
discovery_ret = ""
for ip in available_data_ips:
out, err = execute(
"iscsiadm -m discovery -t st -p %s:3260" % ip)
discovery_ret += out
# if('0' != err) or ('0\n' != err ) or err:
# print_or_raise("Discovery ip:%s failed,continue.." % ip)
if not discovery_ret:
print_or_raise("No discovery record!", ScriptInnerError)
record_list = list(set(discovery_ret.split('\n')[:-1]))
print_or_raise(
"Discovery successful! Record:\n%s" % "\n".join(record_list))
# get iqn and ip like {iqn1: ip1, iqn2:ip2}
raid_config = self._get_iscsi_configs(record_list)
# auto config & login
login_cmd = \
lambda x, y: "iscsiadm -m node -T %s -p %s:3260 -l" % (x, y)
auto_cmd = \
lambda x, y: "iscsiadm -m node -T %s -p %s -o update -n " \
"node.startup -v automatic" % (x, y)
login = []
auto_config = []
for index in range(len(raid_config.keys())):
k = raid_config.keys()[index]
v = raid_config[k]
login += map(login_cmd, [k] * len(v), v)
auto_config += map(auto_cmd, [k] * len(v), v)
execute(";".join(login))
execute(";".join(auto_config))
print_or_raise("Login successful!")
return raid_config
def _modify_host_iqn(self, host_name):
# modify host IQN
host_iqn, err = execute("cat /etc/iscsi/initiatorname.iscsi")
md5_str, err = execute("echo -n %s | openssl md5" % host_name)
host_iqn = host_iqn.split("=")[1].strip()
wish_iqn = "iqn.opencos.rh:" + md5_str.split("=")[1].strip()
if wish_iqn != host_iqn:
print_or_raise(
"The host iqn is:%s, but wish iqn is %s, it will be modified."
% (host_iqn, wish_iqn))
with open("/etc/iscsi/initiatorname.iscsi", "w") as fp:
fp.write("InitiatorName=" + wish_iqn + "\n")
execute("systemctl restart iscsid.service")
def _ls_sd_path(self):
out, err = execute("ls /dev/disk/by-path")
return out.split("\n")[:-1]
def _find_multipath_by_sd(self, iqns, lun_id):
sd_path = []
attemps = 0
while not sd_path:
sd_path = \
[sd for sd in self._ls_sd_path()
if filter(lambda complex_sd_path: complex_sd_path in sd,
[iqn + "-lun-" + str(lun_id) for iqn in iqns])]
attemps += 1
if attemps == 5:
execute("iscsiadm -m node -R")
elif attemps > 10:
print_or_raise(
"After login successful,"
"there is no local sd device match with block device.",
ScriptInnerError)
time.sleep(2)
sd_path = "/dev/disk/by-path/" + sd_path[0]
sd_real_path = os.path.realpath(sd_path)
attemps = 0
multipath_path = ""
while not os.path.exists(multipath_path):
multipath_device, err = execute("multipath -l %s" % sd_real_path)
# if not multipath_device or ('0' != err) or ('0\n' != err) or err:
# continue
multipath_path = "/dev/mapper/" + \
multipath_device.split("\n")[0].split(" ")[0]
attemps += 1
if attemps > 5:
print_or_raise(
"No multipath match with local sd device:%s." %
sd_real_path,
ScriptInnerError)
time.sleep(2)
return multipath_path
def _create_lv_by_multipath_device(
self, multipath, vg_name, lv_name, size, fs_type):
try:
# create lvm base on block device
execute("pvcreate -y -ff %s" % multipath,
check_exit_code=[0, 1, 5])
execute("vgcreate -y -ff %s %s" % (vg_name, multipath),
check_exit_code=[0, 1, 5])
if size == -1:
lvcreate = "lvcreate -W y -l 100%%FREE -n %s %s" % \
(lv_name, vg_name)
else:
lvcreate = "lvcreate -W y -L %sG -n %s %s" % \
(round(size * 0.95, 2), lv_name, vg_name)
execute(lvcreate, check_exit_code=[0, 1, 5])
execute("pvscan --cache --activate ay")
# make filesystem
execute("mkfs.%s /dev/%s/%s" % (fs_type, vg_name, lv_name))
except Exception as e:
self._lv_rollback(lv_name, vg_name, multipath)
print_or_raise("LVM create failed, resource has been rollbacked.",
e)
def deploy_share_disk(self, item, host_name):
config_computer()
self._modify_host_iqn(host_name)
service = item['service']
if service not in ['glance', 'db', 'db_backup', 'mongodb']:
print_or_raise("Service name '%s' is not valid." % service)
# check ip
available_data_ips, invalid_ips = \
get_available_data_ip(item['data_ips'])
if not available_data_ips:
print_or_raise("No valid data ips,please check.", ScriptInnerError)
raid_config = self._establish_iscsi_session(available_data_ips)
lv_config = item.get('lvm_config', None)
vg_name = lv_config.get('vg_name', self._LV_DEFAULT_NAME[service][0])
lv_name = lv_config.get('lv_name', self._LV_DEFAULT_NAME[service][1])
if not self._lv_reentrant_check(vg_name, lv_name, True):
return
multipath = self._find_multipath_by_sd(
raid_config.keys(),
item.get('lun', self._LV_DEFAULT_NAME[service][2]))
self._create_lv_by_multipath_device(multipath,
vg_name,
lv_name,
lv_config.get('size', -1),
lv_config.get('fs_type', 'ext4'))
class CEPHShareDisk(BaseShareDisk):
def __init__(self):
self.monitor_ip = ''
self.monitor_passwd = ''
def deploy_share_disk(self, item, host_name):
self.monitor_ip = item.get('monitor_ip', '')
self.monitor_passwd = item.get('monitor_passwd', '')
rbd_pool = item['rbd_config']['rbd_pool']
rbd_img = item['rbd_config']['rbd_volume']
img_size = int(item['rbd_config']['size'])*1024
fs_type = item['rbd_config'].get('fs_type', 'ext4')
cmd_create = 'sshpass -p %s ssh %s rbd create -p %s --size %s %s ' % \
(self.monitor_passwd,
self.monitor_ip,
rbd_pool,
img_size,
rbd_img)
cmd_query = 'sshpass -p %s ssh %s rbd ls -l %s' % (
self.monitor_passwd, self.monitor_ip, rbd_pool)
image_in_monitor = []
print_or_raise("Create image %s in pool %s at monitor %s." %
(rbd_img, rbd_pool, self.monitor_ip))
try:
out, err = execute(cmd_query)
if out:
for line in out.splitlines():
image_in_monitor.append(line.split()[0])
if rbd_img not in image_in_monitor:
execute(cmd_create)
except Exception as e:
print_or_raise("Query pool %s in monitor error or create image %s "
"in pool %s." % (rbd_pool, rbd_img, rbd_pool), e)
execute("systemctl stop rbdmap")
rbd_map = '%s/%s id=admin,' \
'keyring=/etc/ceph/ceph.client.admin.keyring' % (rbd_pool,
rbd_img)
rbd_map_need_to_write = True
print_or_raise("Write rbdmap.")
with open("/etc/ceph/rbdmap", "a+") as fp:
for line in fp:
if line == rbd_map + "\n":
rbd_map_need_to_write = False
if rbd_map_need_to_write is True:
fp.write(rbd_map + "\n")
execute("chmod 777 /etc/ceph/rbdmap")
execute("systemctl enable rbdmap")
execute("systemctl start rbdmap")
execute("mkfs.%s /dev/rbd/%s/%s" % (fs_type, rbd_pool, rbd_img))

View File

@ -0,0 +1,231 @@
import subprocess
import random
import shlex
import signal
import time
import os
import logging
LOG = logging.getLogger()
formatter = "%(asctime)s %(name)s %(levelname)s %(message)s"
logging.basicConfig(format=formatter,
filename="storage_auto_config.log",
filemode="a",
level=logging.DEBUG)
stream_log = logging.StreamHandler()
stream_log.setLevel(logging.DEBUG)
stream_log.setFormatter(logging.Formatter(formatter))
LOG.addHandler(stream_log)
def print_or_raise(msg, exc=None):
if not exc:
LOG.debug(msg)
else:
if isinstance(exc, Exception):
LOG.error(msg)
raise exc
elif issubclass(exc, Exception):
raise exc(msg)
class ScriptInnerError(Exception):
def __init__(self, message=None):
super(ScriptInnerError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = "Unexpected error while running command."
if exit_code is None:
exit_code = '-'
message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r"
% (description, cmd, exit_code, stdout, stderr))
super(ProcessExecutionError, self).__init__(message)
def execute(cmd, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.s
:param cmd: Passed to subprocess.Popen.
:type cmd: string
TODO:param process_input: Send to opened process.
:type proces_input: string
TODO:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
TODO:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
TODO:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:param loglevel: log level for execute commands.
:type loglevel: int. (Should be logging.DEBUG or logging.INFO)
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
def _subprocess_setup():
# Python installs a SIGPIPE handler by default.
# This is usually not what non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# stdin
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', True)
silent = kwargs.pop('silent', False)
# loglevel = kwargs.pop('loglevel', logging.DEBUG)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(
'Got unknown keyword args to utils.execute: %r' % kwargs)
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=('Command requested root, but did not specify a root '
'helper.'))
cmd = shlex.split(root_helper) + list(cmd)
while attempts > 0:
attempts -= 1
try:
if not silent:
print_or_raise('Running cmd (subprocess): %s' % cmd)
# windows
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell)
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close()
_returncode = obj.returncode
if not silent:
print_or_raise('Result was %s' % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=cmd)
# cmd=sanitized_cmd)
return result
except ProcessExecutionError:
if not attempts:
raise
else:
if not silent:
print_or_raise('%r failed. Retrying.' % cmd)
if delay_on_retry:
time.sleep(random.randint(20, 200) / 100.0)
finally:
time.sleep(0)
def get_available_data_ip(media_ips):
unavailable_ip = []
for media_ip in media_ips:
try:
execute("ping -c 1 -W 2 %s" % media_ip)
except ProcessExecutionError:
unavailable_ip.append(media_ip)
continue
return list(set(media_ips) - set(unavailable_ip)), unavailable_ip
def clear_host_iscsi_resource():
out, err = execute("iscsiadm -m node", check_exit_code=[0, 21])
if not out:
return
sd_ips_list = map(lambda x: x.split(":3260")[0], out.split("\n")[:-1])
if not sd_ips_list:
return
valid_ips, invalid_ips = get_available_data_ip(sd_ips_list)
clear_resource = ""
for ip in invalid_ips:
logout_session = "iscsiadm -m node -p %s -u;" % ip
del_node = "iscsiadm -m node -p %s -o delete;" % ip
# manual_startup = "iscsiadm -m node -p %s -o update -n node.startup "
# "-v manual;" % ip
clear_resource += (logout_session + del_node)
execute(clear_resource, check_exit_code=[0, 21], silent=True)
# _execute("multipath -F")
def config_computer():
# remove exist iscsi resource
clear_host_iscsi_resource()
config_multipath()
def config_multipath():
if os.path.exists("/etc/multipath.conf"):
execute("echo y|mv /etc/multipath.conf /etc/multipath.conf.bak",
check_exit_code=[0, 1])
execute("cp -p base/multipath.conf /etc/")
execute("systemctl enable multipathd.service;"
"systemctl restart multipathd.service")

View File

@ -0,0 +1,168 @@
###############################################################################
# Author: CG
# Description:
# 1.The script should be copied to the host, before running.
# 2.The script is not thread safe.
# 3.Example for script call:
# [config share disk]:
# python storage_auto_config share_disk <host_pxe_mac>,
# we use host_pxe_mac to generate host IQN by md5 and write it to
# '/etc/iscsi/initiatorname.iscsi'
# [config cinder]: python storage_auto_config cinder_conf 10.43.177.129,
# the second parameter for cinder_config is cinder <host_ip>.
# If the backend is CEPH,you should call the following command:
# python storage_auto_config glance_rbd_conf at glance node &
# python storage_auto_config nova_rbd_conf at nova node.
# [config multipath]:python storage_auto_config check_multipath.
# 4.Before run script,the cinder.json and control.json file
# must be must be config.
###############################################################################
import sys
import uuid
import traceback
from common.utils import *
from common.cinder_conf import BaseConfig, CEPHBackendConfig
from common.share_disk import BaseShareDisk
try:
import simplejson as json
except ImportError:
import json
def _set_config_file(file, section, key, value):
set_config = BaseConfig.SET_CONFIG.format(
config_file=file,
section=section,
key=key,
value=value)
execute(set_config)
def config_share_disk(config, host_name):
# deploy share_disk
for item in config:
BaseShareDisk.single_instance().deploy_share_disk(item, host_name)
def config_cinder(config, cinder_host_ip=""):
# config xml and cinder.conf
for config in config['disk_array']:
# load disk array global config
backends = config['backend']
for item in backends.items():
BaseConfig.single_instance().config_backend(
item,
management_ips=config.get('management_ips', []),
data_ips=config.get('data_ips', []),
user_name=config.get('user_name', []),
user_pwd=config.get('user_pwd', []),
cinder_host_ip=cinder_host_ip)
# config multipath
config_computer()
# enable config
execute("systemctl restart openstack-cinder-volume.service")
def config_nova_with_rbd(config):
# config xml and cinder.conf
for config in config['disk_array']:
# load disk array global config
backends = config['backend']
for key, value in backends.items():
if value.get('volume_driver') == 'CEPH':
uuid_instance = uuid.uuid3(uuid.NAMESPACE_DNS, "zte.com.cn")
uuid_str = uuid_instance.urn.split(":")[2]
_set_config_file(CEPHBackendConfig.NOVA_CONF_FILE,
'libvirt',
'images_type',
'rbd')
_set_config_file(CEPHBackendConfig.NOVA_CONF_FILE,
'libvirt',
'rbd_secret_uuid',
uuid_str)
return
# enable config
execute("systemctl restart openstack-nova-compute.service")
def config_glance_with_rbd(config):
# config xml and cinder.conf
for config in config['disk_array']:
# load disk array global config
backends = config['backend']
for key, value in backends.items():
if value.get('volume_driver') == 'CEPH':
_set_config_file(CEPHBackendConfig.GLANCE_API_CONF_FILE,
'DEFAULT',
'show_image_direct_url',
'True')
_set_config_file(CEPHBackendConfig.GLANCE_API_CONF_FILE,
'glance_store',
'default_store',
'rbd')
return
# enable config
execute("systemctl restart openstack-glance-api.service")
def _launch_script():
def subcommand_launcher(args, valid_args_len, json_path, oper_type):
if len(args) < valid_args_len:
print_or_raise("Too few parameter is given,please check.",
ScriptInnerError)
with open(json_path, "r") as fp_json:
params = json.load(fp_json)
print_or_raise("-----Begin config %s, params is %s.-----" %
(oper_type, params))
return params
oper_type = sys.argv[1] if len(sys.argv) > 1 else ""
try:
if oper_type == "share_disk":
share_disk_config = \
subcommand_launcher(sys.argv, 3, "base/control.json",
oper_type)
config_share_disk(share_disk_config, sys.argv[2])
elif oper_type == "cinder_conf":
cinder_backend_config = subcommand_launcher(sys.argv, 3,
"base/cinder.json",
oper_type)
config_cinder(cinder_backend_config, sys.argv[2])
elif oper_type == "nova_rbd_conf":
nova_rbd_config = subcommand_launcher(sys.argv, 1,
"base/cinder.json",
oper_type)
config_nova_with_rbd(nova_rbd_config)
elif oper_type == "glance_rbd_conf":
glance_rbd_config = subcommand_launcher(sys.argv, 1,
"base/cinder.json",
oper_type)
config_glance_with_rbd(glance_rbd_config)
elif oper_type == "check_multipath":
print_or_raise("-----Begin config %s.-----")
config_computer()
elif oper_type == "debug":
pass
else:
print_or_raise("Script operation is not given,such as:share_disk,"
"cinder_conf,nova_rbd_conf,glance_rbd_conf,"
"check_multipath.", ScriptInnerError)
except Exception as e:
print_or_raise("----------Operation %s is Failed.----------\n"
"Exception call chain as follow,%s" %
(oper_type, traceback.format_exc()))
raise e
else:
print_or_raise("----------Operation %s is done!----------" %
oper_type)
if __name__ == "__main__":
_launch_script()

1447
backend/tecs/tecs.conf Executable file

File diff suppressed because it is too large Load Diff

9
backend/tecs/tfg_upgrade.sh Executable file
View File

@ -0,0 +1,9 @@
#!/bin/bash
scriptsdir=$(cd $(dirname $0) && pwd)
ISODIR=`mktemp -d /mnt/TFG_ISOXXXXXX`
mount -o loop $scriptsdir/*CGSL_VPLAT*.iso ${ISODIR}
cp ${ISODIR}/*CGSL_VPLAT*.bin $scriptsdir
umount ${ISODIR}
[ -e ${ISODIR} ] && rm -rf ${ISODIR}
$scriptsdir/*CGSL_VPLAT*.bin upgrade reboot

93
backend/tecs/trustme.sh Executable file
View File

@ -0,0 +1,93 @@
#!/bin/sh
# 让某个主机彻底信任我以后ssh登录过去不需要密码
#检查参数是否合法
logfile=/var/log/trustme.log
function print_log
{
local promt="$1"
echo -e "$promt"
echo -e "`date -d today +"%Y-%m-%d %H:%M:%S"` $promt" >> $logfile
}
ip=$1
if [ -z $ip ]; then
print_log "Usage: `basename $0` ipaddr passwd"
exit 1
fi
passwd=$2
if [ -z $passwd ]; then
print_log "Usage: `basename $0` ipaddr passwd"
exit 1
fi
rpm -qi sshpass >/dev/null
if [ $? != 0 ]; then
print_log "Please install sshpass first"
exit 1
fi
#试试对端能不能ping得通
unreachable=`ping $ip -c 1 -W 3 | grep -c "100% packet loss"`
if [ $unreachable -eq 1 ]; then
print_log "host $ip is unreachable"
exit 1
fi
#如果本机还没有ssh公钥就生成一个
if [ ! -e ~/.ssh/id_dsa.pub ]; then
print_log "generating ssh public key ..."
ssh-keygen -t dsa -f /root/.ssh/id_dsa -N ""
if [ $? != 0 ]; then
print_log "ssh-keygen failed"
exit 1
fi
fi
#首先在对端删除原来保存的信任公钥
user=`whoami`
host=`hostname`
keyend="$user@$host"
print_log "my keyend = $keyend"
cmd="sed '/$keyend$/d' -i ~/.ssh/authorized_keys"
#echo cmd:$cmd
print_log "clear my old pub key on $ip ..."
sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "rm -rf /root/.ssh/known_hosts"
if [ $? != 0 ]; then
print_log "ssh $ip to delete known_hosts failed"
exit 1
fi
sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "touch ~/.ssh/authorized_keys"
if [ $? != 0 ]; then
print_log "ssh $ip to create file authorized_keys failed"
exit 1
fi
sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "$cmd"
if [ $? != 0 ]; then
print_log "ssh $ip to edit authorized_keys failed"
exit 1
fi
#把新生成的拷贝过去
print_log "copy my public key to $ip ..."
tmpfile=/tmp/`hostname`.key.pub
sshpass -p $passwd scp -o StrictHostKeyChecking=no ~/.ssh/id_dsa.pub $ip:$tmpfile
if [ $? != 0 ]; then
print_log "scp file to $ip failed"
exit 1
fi
#在对端将其追加到authorized_keys
print_log "on $ip, append my public key to ~/.ssh/authorized_keys ..."
sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "cat $tmpfile >> ~/.ssh/authorized_keys"
if [ $? != 0 ]; then
print_log "ssh $ip to add public key for authorized_keys failed"
exit 1
fi
print_log "rm tmp file $ip:$tmpfile"
sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "rm $tmpfile"
if [ $? != 0 ]; then
print_log "ssh $ip to delete tmp file failed"
exit 1
fi
print_log "trustme ok!"

62
backend/zenic/trustme.sh Executable file
View File

@ -0,0 +1,62 @@
#!/bin/sh
# 让某个主机彻底信任我以后ssh登录过去不需要密码
#检查参数是否合法
ip=$1
if [ -z $ip ]; then
echo "Usage: `basename $0` ipaddr passwd" >&2
exit 1
fi
passwd=$2
if [ -z $passwd ]; then
echo "Usage: `basename $0` ipaddr passwd" >&2
exit 1
fi
rpm -qi sshpass >/dev/null
if [ $? != 0 ]; then
echo "Please install sshpass first!" >&2
exit 1
fi
#试试对端能不能ping得通
unreachable=`ping $ip -c 1 -W 3 | grep -c "100% packet loss"`
if [ $unreachable -eq 1 ]; then
echo "host $ip is unreachable!!!"
exit 1
fi
#如果本机还没有ssh公钥就生成一个
if [ ! -e ~/.ssh/id_dsa.pub ]; then
echo "generating ssh public key ..."
ssh-keygen -t dsa -f /root/.ssh/id_dsa -N ""
fi
#首先在对端删除原来保存的信任公钥
user=`whoami`
host=`hostname`
keyend="$user@$host"
echo "my keyend = $keyend"
cmd="sed '/$keyend$/d' -i ~/.ssh/authorized_keys"
#echo cmd:$cmd
echo "clear my old pub key on $ip ..."
sshpass -p $passwd ssh $ip "rm -rf /root/.ssh/known_hosts"
sshpass -p $passwd ssh $ip "touch ~/.ssh/authorized_keys"
sshpass -p $passwd ssh $ip "$cmd"
#把新生成的拷贝过去
echo "copy my public key to $ip ..."
tmpfile=/tmp/`hostname`.key.pub
sshpass -p $passwd scp ~/.ssh/id_dsa.pub $ip:$tmpfile
#在对端将其追加到authorized_keys
echo "on $ip, append my public key to ~/.ssh/authorized_keys ..."
sshpass -p $passwd ssh $ip "cat $tmpfile >> ~/.ssh/authorized_keys"
echo "rm tmp file $ip:$tmpfile"
sshpass -p $passwd ssh $ip "rm $tmpfile"
echo "trustme ok!"

17
backend/zenic/zenic.conf Executable file
View File

@ -0,0 +1,17 @@
[general]
nodeip=192.168.3.1
nodeid=1
hostname=sdn59
needzamp=y
zbpips=192.168.3.1
zbp_node_num=1
zbpnodelist=1,256
zampips=192.168.3.1
zamp_node_num=1
mongodbips=192.168.3.1
mongodb_node_num=1
zamp_vip=
mongodb_vip=
MacName=eth1
netid=1234
memmode=tiny

348
code/daisy/AUTHORS Executable file
View File

@ -0,0 +1,348 @@
Aaron Rosen <aaronorosen@gmail.com>
Abhijeet Malawade <Abhijeet.Malawade@nttdata.com>
Abhishek Kekane <abhishek.kekane@nttdata.com>
Adam Gandelman <adam.gandelman@canonical.com>
Adam Gandelman <adamg@ubuntu.com>
Alberto Planas <aplanas@gmail.com>
Alessandro Pilotti <ap@pilotti.it>
Alessio Ababilov <aababilo@yahoo-inc.com>
Alessio Ababilov <aababilov@griddynamics.com>
Alex Gaynor <alex.gaynor@gmail.com>
Alex Meade <mr.alex.meade@gmail.com>
Alexander Gordeev <agordeev@mirantis.com>
Alexander Tivelkov <ativelkov@mirantis.com>
Amala Basha <princessbasha@gmail.com>
AmalaBasha <amala.alungal@RACKSPACE.COM>
AmalaBasha <princessbasha@gmail.com>
Anastasia Vlaskina <alatynskaya@mirantis.com>
Andreas Jaeger <aj@suse.de>
Andrew Hutchings <andrew@linuxjedi.co.uk>
Andrew Melton <andrew.melton@rackspace.com>
Andrew Tranquada <andrew.tranquada@rackspace.com>
Andrey Brindeyev <abrindeyev@griddynamics.com>
Andy McCrae <andy.mccrae@gmail.com>
Anita Kuno <akuno@lavabit.com>
Arnaud Legendre <arnaudleg@gmail.com>
Artur Svechnikov <asvechnikov@mirantis.com>
Ashish Jain <ashish.jain14@wipro.com>
Ashwini Shukla <ashwini.shukla@rackspace.com>
Aswad Rangnekar <aswad.rangnekar@nttdata.com>
Attila Fazekas <afazekas@redhat.com>
Avinash Prasad <avinash.prasad@nttdata.com>
Balazs Gibizer <balazs.gibizer@ericsson.com>
Bartosz Fic <bartosz.fic@intel.com>
Ben Nemec <bnemec@us.ibm.com>
Ben Roble <ben.roble@rackspace.com>
Bernhard M. Wiedemann <bwiedemann@suse.de>
Bhuvan Arumugam <bhuvan@apache.org>
Boris Pavlovic <boris@pavlovic.me>
Brant Knudson <bknudson@us.ibm.com>
Brian Cline <bcline@softlayer.com>
Brian D. Elliott <bdelliott@gmail.com>
Brian Elliott <bdelliott@gmail.com>
Brian Elliott <brian.elliott@rackspace.com>
Brian Lamar <brian.lamar@rackspace.com>
Brian Rosmaita <brian.rosmaita@rackspace.com>
Brian Waldon <brian.waldon@rackspace.com>
Cerberus <matt.dietz@rackspace.com>
Chang Bo Guo <guochbo@cn.ibm.com>
ChangBo Guo(gcb) <eric.guo@easystack.cn>
Chmouel Boudjnah <chmouel@chmouel.com>
Chris Allnutt <chris.allnutt@rackspace.com>
Chris Behrens <cbehrens@codestud.com>
Chris Buccella <buccella@linux.vnet.ibm.com>
Chris Buccella <chris.buccella@antallagon.com>
Chris Fattarsi <chris.fattarsi@pistoncloud.com>
Christian Berendt <berendt@b1-systems.de>
Christopher MacGown <chris@pistoncloud.com>
Chuck Short <chuck.short@canonical.com>
Cindy Pallares <cindy.pallaresq@gmail.com>
Clark Boylan <clark.boylan@gmail.com>
Cory Wright <corywright@gmail.com>
Dan Prince <dprince@redhat.com>
Danny Al-Gaaf <danny.al-gaaf@bisect.de>
Davanum Srinivas <davanum@gmail.com>
Davanum Srinivas <dims@linux.vnet.ibm.com>
Dave Chen <wei.d.chen@intel.com>
Dave Walker (Daviey) <email@daviey.com>
David Koo <david.koo@huawei.com>
David Peraza <dperaza@linux.vnet.ibm.com>
David Ripton <dripton@redhat.com>
Dean Troyer <dtroyer@gmail.com>
DennyZhang <denny@unitedstack.com>
Derek Higgins <derekh@redhat.com>
Dirk Mueller <dirk@dmllr.de>
Dmitry Kulishenko <dmitryk@yahoo-inc.com>
Dolph Mathews <dolph.mathews@gmail.com>
Donal Lafferty <donal.lafferty@citrix.com>
Doron Chen <cdoron@il.ibm.com>
Doug Hellmann <doug.hellmann@dreamhost.com>
Doug Hellmann <doug@doughellmann.com>
Duncan McGreggor <duncan@dreamhost.com>
Eddie Sheffield <eddie.sheffield@rackspace.com>
Edward Hope-Morley <edward.hope-morley@canonical.com>
Eldar Nugaev <enugaev@griddynamics.com>
Elena Ezhova <eezhova@mirantis.com>
Eoghan Glynn <eglynn@redhat.com>
Eric Brown <browne@vmware.com>
Eric Windisch <eric@cloudscaling.com>
Erno Kuvaja <jokke@hp.com>
Eugeniya Kudryashova <ekudryashova@mirantis.com>
Ewan Mellor <ewan.mellor@citrix.com>
Fabio M. Di Nitto <fdinitto@redhat.com>
Fei Long Wang <flwang@catalyst.net.nz>
Fei Long Wang <flwang@cn.ibm.com>
Fengqian Gao <fengqian.gao@intel.com>
Flaper Fesp <flaper87@gmail.com>
Flavio Percoco <flaper87@gmail.com>
Florent Flament <florent.flament-ext@cloudwatt.com>
Gabriel Hurley <gabriel@strikeawe.com>
Gauvain Pocentek <gauvain.pocentek@objectif-libre.com>
Geetika Batra <geetika791@gmail.com>
George Peristerakis <george.peristerakis@enovance.com>
Georgy Okrokvertskhov <gokrokvertskhov@mirantis.com>
Gerardo Porras <gporras@yahoo-inc.com>
Gorka Eguileor <geguileo@redhat.com>
Grant Murphy <grant.murphy@hp.com>
Haiwei Xu <xu-haiwei@mxw.nes.nec.co.jp>
He Yongli <yongli.he@intel.com>
Hemanth Makkapati <hemanth.makkapati@mailtrust.com>
Hemanth Makkapati <hemanth.makkapati@rackspace.com>
Hengqing Hu <hudayou@hotmail.com>
Hirofumi Ichihara <ichihara.hirofumi@lab.ntt.co.jp>
Hui Xiang <hui.xiang@canonical.com>
Ian Cordasco <ian.cordasco@rackspace.com>
Iccha Sethi <iccha.sethi@rackspace.com>
Igor A. Lukyanenkov <ilukyanenkov@griddynamics.com>
Ihar Hrachyshka <ihrachys@redhat.com>
Ildiko Vancsa <ildiko.vancsa@ericsson.com>
Ilya Pekelny <ipekelny@mirantis.com>
Inessa Vasilevskaya <ivasilevskaya@mirantis.com>
Ionuț Arțăriși <iartarisi@suse.cz>
Isaku Yamahata <yamahata@valinux.co.jp>
J. Daniel Schmidt <jdsn@suse.de>
Jakub Ruzicka <jruzicka@redhat.com>
James Carey <jecarey@us.ibm.com>
James E. Blair <jeblair@hp.com>
James Li <yueli.m@gmail.com>
James Morgan <james.morgan@rackspace.com>
James Polley <jp@jamezpolley.com>
Jamie Lennox <jamielennox@redhat.com>
Jared Culp <jared.culp@rackspace.com>
Jasakov Artem <ayasakov@mirantis.com>
Jason Koelker <jason@koelker.net>
Jason Kölker <jason@koelker.net>
Jay Pipes <jaypipes@gmail.com>
Jeremy Stanley <fungi@yuggoth.org>
Jesse Andrews <anotherjesse@gmail.com>
Jesse J. Cook <jesse.cook@rackspace.com>
Jia Dong <jiadong.jia@huawei.com>
Jinwoo 'Joseph' Suh <jsuh@isi.edu>
Joe Gordon <joe.gordon0@gmail.com>
Joe Gordon <jogo@cloudscaling.com>
Johannes Erdfelt <johannes.erdfelt@rackspace.com>
John Bresnahan <jbresnah@redhat.com>
John Lenihan <john.lenihan@hp.com>
John Warren <jswarren@us.ibm.com>
Jon Bernard <jobernar@redhat.com>
Joseph Suh <jsuh@isi.edu>
Josh Durgin <josh.durgin@dreamhost.com>
Josh Durgin <josh.durgin@inktank.com>
Josh Kearney <josh@jk0.org>
Joshua Harlow <harlowja@yahoo-inc.com>
Juan Manuel Olle <juan.m.olle@intel.com>
Juerg Haefliger <juerg.haefliger@hp.com>
Julia Varlamova <jvarlamova@mirantis.com>
Julien Danjou <julien@danjou.info>
Jun Hong Li <junhongl@cn.ibm.com>
Justin Santa Barbara <justin@fathomdb.com>
Justin Shepherd <jshepher@rackspace.com>
KIYOHIRO ADACHI <adachi@mxs.nes.nec.co.jp>
Kamil Rykowski <kamil.rykowski@intel.com>
Kasey Alusi <kasey.alusi@gmail.com>
Ken Pepple <ken.pepple@gmail.com>
Ken Thomas <krt@yahoo-inc.com>
Kent Wang <kent.wang@intel.com>
Keshava Bharadwaj <kb.sankethi@gmail.com>
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
Kui Shi <skuicloud@gmail.com>
Kun Huang <gareth@unitedstack.com>
Lakshmi N Sampath <lakshmi.sampath@hp.com>
Lars Gellrich <lars.gellrich@hp.com>
Leam <leam.hall@mailtrust.com>
Leandro I. Costantino <leandro.i.costantino@intel.com>
Liu Yuan <namei.unix@gmail.com>
Lorin Hochstein <lorin@nimbisservices.com>
Louis Taylor <kragniz@gmail.com>
Louis Taylor <louis@kragniz.eu>
Luis A. Garcia <luis@linux.vnet.ibm.com>
Major Hayden <major@mhtx.net>
Mark J. Washenberger <mark.washenberger@markwash.net>
Mark J. Washenberger <mark.washenberger@rackspace.com>
Mark McLoughlin <markmc@redhat.com>
Mark Washenberger <mark.washenberger@rackspace.com>
Martin Kletzander <mkletzan@redhat.com>
Maru Newby <mnewby@internap.com>
Masashi Ozawa <mozawa@cloudian.com>
Matt Dietz <matt.dietz@rackspace.com>
Matt Fischer <matt@mattfischer.com>
Matt Riedemann <mriedem@us.ibm.com>
Matthew Booth <mbooth@redhat.com>
Matthew Treinish <treinish@linux.vnet.ibm.com>
Matthias Schmitz <matthias@sigxcpu.org>
Maurice Leeflang <maurice@leeflang.net>
Mauro S. M. Rodrigues <maurosr@linux.vnet.ibm.com>
Michael J Fork <mjfork@us.ibm.com>
Michael Still <mikal@stillhq.com>
Michal Dulko <michal.dulko@intel.com>
Mike Fedosin <mfedosin@mirantis.com>
Mike Lundy <mike@pistoncloud.com>
Monty Taylor <mordred@inaugust.com>
Nassim Babaci <nassim.babaci@cloudwatt.com>
Nicholas Kuechler <nkuechler@gmail.com>
Nicolas Simonds <nic@metacloud.com>
Nikhil Komawar <nikhil.komawar@rackspace.com>
Nikhil Komawar <nikhilskomawar@gmail.com>
Nikolaj Starodubtsev <nstarodubtsev@mirantis.com>
Noboru Arai <arai@mxa.nes.nec.co.jp>
Noboru arai <arai@mxa.nes.nec.co.jp>
Oleksii Chuprykov <ochuprykov@mirantis.com>
Olena Logvinova <ologvinova@mirantis.com>
Pamela-Rose Virtucio <virtu006@umn.edu>
Patrick Mezard <patrick@mezard.eu>
Paul Bourke <paul-david.bourke@hp.com>
Paul Bourke <pauldbourke@gmail.com>
Paul McMillan <paul.mcmillan@nebula.com>
Pavan Kumar Sunkara <pavan.sss1991@gmail.com>
Pawel Koniszewski <pawel.koniszewski@intel.com>
Pawel Skowron <pawel.skowron@intel.com>
Peng Yong <ppyy@pubyun.com>
Pete Zaitcev <zaitcev@kotori.zaitcev.us>
Pranali Deore <pranali11.deore@nttdata.com>
PranaliDeore <pranali11.deore@nttdata.com>
Pádraig Brady <P@draigBrady.com>
Pádraig Brady <pbrady@redhat.com>
Radu <mateescu@ca.ibm.com>
Rainya Mosher <rainya.mosher@rackspace.com>
Rajesh Tailor <rajesh.tailor@nttdata.com>
Ray Chen <oldsharp@163.com>
Reynolds Chin <benzwt@gmail.com>
Rick Clark <rick@openstack.org>
Rick Harris <rconradharris@gmail.com>
Robert Collins <rbtcollins@hp.com>
Rohan Kanade <rkanade@redhat.com>
Roman Bogorodskiy <bogorodskiy@gmail.com>
Roman Bogorodskiy <rbogorodskiy@mirantis.com>
Roman Vasilets <rvasilets@mirantis.com>
Rongze Zhu <zrzhit@gmail.com>
RongzeZhu <zrzhit@gmail.com>
Russell Bryant <rbryant@redhat.com>
Russell Sim <russell.sim@gmail.com>
Sabari Kumar Murugesan <smurugesan@vmware.com>
Sam Morrison <sorrison@gmail.com>
Sam Stavinoha <smlstvnh@gmail.com>
Samuel Merritt <sam@swiftstack.com>
Sascha Peilicke <saschpe@gmx.de>
Sascha Peilicke <saschpe@suse.de>
Sathish Nagappan <sathish.nagappan@nebula.com>
Sean Dague <sdague@linux.vnet.ibm.com>
Sean Dague <sean@dague.net>
Sergey Nikitin <snikitin@mirantis.com>
Sergey Skripnick <sskripnick@mirantis.com>
Sergey Vilgelm <svilgelm@mirantis.com>
Sergio Cazzolato <sergio.j.cazzolato@intel.com>
Shane Wang <shane.wang@intel.com>
Soren Hansen <soren.hansen@rackspace.com>
Stan Lagun <slagun@mirantis.com>
Steve Kowalik <steven@wedontsleep.org>
Steve Lewis <stevelle@gmail.com>
Stuart McLaren <stuart.mclaren@hp.com>
Sulochan Acharya <sulochan.acharya@rackspace.co.uk>
Svetlana Shturm <sshturm@mirantis.com>
Taku Fukushima <tfukushima@dcl.info.waseda.ac.jp>
Tatyana Leontovich <tleontov@yahoo-inc.com>
Therese McHale <therese.mchale@hp.com>
Thierry Carrez <thierry@openstack.org>
Thomas Bechtold <tbechtold@suse.com>
Thomas Bechtold <thomasbechtold@jpberlin.de>
Thomas Leaman <thomas.leaman@hp.com>
Tim Daly, Jr <timjr@yahoo-inc.com>
Toan Nguyen <toan.nguyen@rackspace.com>
Tom Hancock <tom.hancock@hp.com>
Tom Leaman <thomas.leaman@hp.com>
Tomas Hancock <tom.hancock@hp.com>
Travis Tripp <travis.tripp@hp.com>
Unmesh Gurjar <unmesh.gurjar@nttdata.com>
Unmesh Gurjar <unmesh.gurjar@vertex.co.in>
Vaibhav Bhatkar <vaibhav.bhatkar@gmail.com>
Venkatesh Sampath <venkatesh.sampath@outlook.com>
Venkatesh Sampath <venkatesh.sampath@rackspace.com>
Victor Morales <victor.morales@intel.com>
Victor Sergeyev <vsergeyev@mirantis.com>
Vincent Untz <vuntz@suse.com>
Vishvananda Ishaya <vishvananda@gmail.com>
Vitaliy Kolosov <vkolosov@griddynamics.com>
Vyacheslav Vakhlyuev <vvakhlyuev@mirantis.com>
Wayne A. Walls <wayne.walls@rackspace.com>
Wayne Okuma <wayne.okuma@hp.com>
Wen Cheng Ma <wenchma@cn.ibm.com>
Wu Wenxiang <wu.wenxiang@99cloud.net>
YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Yaguang Tang <heut2008@gmail.com>
Yanis Guenane <yanis.guenane@enovance.com>
Yufang Zhang <yufang521247@gmail.com>
Yuriy Taraday <yorik.sar@gmail.com>
Yusuke Ide <idzzyzzbi@gmail.com>
ZHANG Hua <zhuadl@cn.ibm.com>
Zhenguo Niu <zhenguo@unitedstack.com>
Zhi Yan Liu <zhiyanl@cn.ibm.com>
ZhiQiang Fan <aji.zqfan@gmail.com>
ZhiQiang Fan <zhiqiang.fan@huawei.com>
Zhiteng Huang <zhiteng.huang@intel.com>
Zhongyue Luo <zhongyue.nah@intel.com>
abhishek-kekane <abhishek.kekane@nttdata.com>
abhishekkekane <abhishek.kekane@nttdata.com>
amalaba <princessbasha@gmail.com>
ankitagrawal <ankit11.agrawal@nttdata.com>
ankur <ankur.gupta@nectechnologies.in>
annegentle <anne@openstack.org>
daisy-ycguo <daisy.ycguo@gmail.com>
eddie-sheffield <eddie.sheffield@rackspace.com>
eos2102 <eos2102@gmail.com>
gengjh <gengjh@cn.ibm.com>
henriquetruta <henrique@lsd.ufcg.edu.br>
huangtianhua <huangtianhua@huawei.com>
hzrandd <82433422@qq.com>
iccha <iccha.sethi@rackspace.com>
iccha-sethi <iccha.sethi@rackspace.com>
iccha.sethi <iccha.sethi@rackspace.com>
isethi <iccha.sethi@rackspace.com>
jakedahn <jake@ansolabs.com>
jare6412 <jared.culp@mailtrust.com>
jaypipes@gmail.com <>
jola-mirecka <jola.mirecka@hp.com>
lawrancejing <lawrancejing@gmail.com>
leseb <sebastien.han@enovance.com>
ling-yun <zengyunling@huawei.com>
liuqing <jing.liuqing@99cloud.net>
liyingjun <liyingjun1988@gmail.com>
liyingjun <yingjun.li@kylin-cloud.com>
lizheming <lizheming.li@huawei.com>
llg8212 <lilinguo@huawei.com>
ls1175 <liusheng@huawei.com>
marianitadn <maria.nita.dn@gmail.com>
mathrock <nathanael.i.burton.work@gmail.com>
nanhai liao <nanhai.liao@kylin-cloud.com>
pran1990 <praneshpg@gmail.com>
ravikumar-venkatesan <ravikumar.venkatesan@hp.com>
sai krishna sripada <krishna1256@gmail.com>
sarvesh-ranjan <saranjan@cisco.com>
shreeduth-awasthi <shreeduth.awasthi@tcs.com>
shrutiranade38 <shrutiranade38@gmail.com>
shu,xinxin <xinxin.shu@intel.com>
sridevik <koushiksridevi8@gmail.com>
sridevik <sridevi.koushik@rackspace.com>
tanlin <lin.tan@intel.com>
tmcpeak <travis_mcpeak@symantec.com>
wanghong <w.wanghong@huawei.com>
yangxurong <yangxurong@huawei.com>

16
code/daisy/CONTRIBUTING.rst Executable file
View File

@ -0,0 +1,16 @@
If you would like to contribute to the development of OpenStack,
you must follow the steps in documented at:
http://docs.openstack.org/infra/manual/developers.html#development-workflow
Once those steps have been completed, changes to OpenStack
should be submitted for review via the Gerrit tool, following
the workflow documented at:
http://docs.openstack.org/infra/manual/developers.html#development-workflow
Pull requests submitted through GitHub will be ignored.
Bugs should be filed on Launchpad, not GitHub:
https://bugs.launchpad.net/glance

2869
code/daisy/ChangeLog Executable file

File diff suppressed because it is too large Load Diff

25
code/daisy/HACKING.rst Executable file
View File

@ -0,0 +1,25 @@
glance Style Commandments
=======================
- Step 1: Read the OpenStack Style Commandments
http://docs.openstack.org/developer/hacking/
- Step 2: Read on
glance Specific Commandments
--------------------------
- [G316] Change assertTrue(isinstance(A, B)) by optimal assert like
assertIsInstance(A, B)
- [G317] Change assertEqual(type(A), B) by optimal assert like
assertIsInstance(A, B)
- [G318] Change assertEqual(A, None) or assertEqual(None, A) by optimal assert like
assertIsNone(A)
- [G319] Validate that debug level logs are not translated
- [G320] For python 3 compatibility, use six.text_type() instead of unicode()
- [G321] Validate that LOG messages, except debug ones, have translations
- [G322] Validate that LOG.info messages use _LI.
- [G323] Validate that LOG.exception messages use _LE.
- [G324] Validate that LOG.error messages use _LE.
- [G325] Validate that LOG.critical messages use _LC.
- [G326] Validate that LOG.warning messages use _LW.
- [G327] Prevent use of deprecated contextlib.nested

176
code/daisy/LICENSE Executable file
View File

@ -0,0 +1,176 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

20
code/daisy/MANIFEST.in Executable file
View File

@ -0,0 +1,20 @@
include run_tests.sh ChangeLog
include README.rst builddeb.sh
include MANIFEST.in pylintrc
include AUTHORS
include run_tests.py
include HACKING.rst
include LICENSE
include ChangeLog
include babel.cfg tox.ini
include openstack-common.conf
include glance/openstack/common/README
include glance/db/sqlalchemy/migrate_repo/README
include glance/db/sqlalchemy/migrate_repo/migrate.cfg
include glance/db/sqlalchemy/migrate_repo/versions/*.sql
graft doc
graft etc
graft glance/locale
graft glance/tests
graft tools
global-exclude *.pyc

30
code/daisy/PKG-INFO Executable file
View File

@ -0,0 +1,30 @@
Metadata-Version: 1.1
Name: glance
Version: 2015.1.0
Summary: OpenStack Image Service
Home-page: http://www.openstack.org/
Author: OpenStack
Author-email: openstack-dev@lists.openstack.org
License: UNKNOWN
Description: ======
Glance
======
Glance is a project that defines services for discovering, registering,
retrieving and storing virtual machine images.
Use the following resources to learn more:
* `Official Glance documentation <http://docs.openstack.org/developer/glance/>`_
* `Official Client documentation <http://docs.openstack.org/developer/python-glanceclient/>`_
Platform: UNKNOWN
Classifier: Environment :: OpenStack
Classifier: Intended Audience :: Information Technology
Classifier: Intended Audience :: System Administrators
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Operating System :: POSIX :: Linux
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7

11
code/daisy/README.rst Executable file
View File

@ -0,0 +1,11 @@
======
Glance
======
Glance is a project that defines services for discovering, registering,
retrieving and storing virtual machine images.
Use the following resources to learn more:
* `Official Glance documentation <http://docs.openstack.org/developer/glance/>`_
* `Official Client documentation <http://docs.openstack.org/developer/python-glanceclient/>`_

1
code/daisy/babel.cfg Executable file
View File

@ -0,0 +1 @@
[python: **.py]

View File

@ -0,0 +1,30 @@
Metadata-Version: 1.1
Name: daisy
Version: 2015.1.0
Summary: OpenStack Image Service
Home-page: http://www.openstack.org/
Author: OpenStack
Author-email: openstack-dev@lists.openstack.org
License: UNKNOWN
Description: ======
Dasiy
======
Daisy is a project that defines services for discovering, registering,
retrieving and storing virtual machine images.
Use the following resources to learn more:
* `Official Daisy documentation <http://docs.openstack.org/developer/daisy/>`_
* `Official Client documentation <http://docs.openstack.org/developer/python-daisyclient/>`_
Platform: UNKNOWN
Classifier: Environment :: OpenStack
Classifier: Intended Audience :: Information Technology
Classifier: Intended Audience :: System Administrators
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Operating System :: POSIX :: Linux
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7

View File

@ -0,0 +1,525 @@
.coveragerc
.mailmap
.testr.conf
AUTHORS
CONTRIBUTING.rst
ChangeLog
HACKING.rst
LICENSE
MANIFEST.in
README.rst
babel.cfg
openstack-common.conf
pylintrc
requirements.txt
run_tests.sh
setup.cfg
setup.py
test-requirements.txt
tox.ini
doc/source/architecture.rst
doc/source/authentication.rst
doc/source/cache.rst
doc/source/common-image-properties.rst
doc/source/conf.py
doc/source/configuring.rst
doc/source/controllingservers.rst
doc/source/db.rst
doc/source/formats.rst
doc/source/daisyapi.rst
doc/source/daisyclient.rst
doc/source/daisymetadefcatalogapi.rst
doc/source/identifiers.rst
doc/source/index.rst
doc/source/installing.rst
doc/source/metadefs-concepts.rst
doc/source/notifications.rst
doc/source/policies.rst
doc/source/property-protections.rst
doc/source/statuses.rst
doc/source/images/architecture.png
doc/source/images/image_status_transition.png
doc/source/images_src/architecture.graphml
doc/source/images_src/image_status_transition.dot
doc/source/images_src/image_status_transition.png
doc/source/man/footer.rst
doc/source/man/general_options.rst
doc/source/man/daisyapi.rst
doc/source/man/daisycachecleaner.rst
doc/source/man/daisycachemanage.rst
doc/source/man/daisycacheprefetcher.rst
doc/source/man/daisycachepruner.rst
doc/source/man/daisycontrol.rst
doc/source/man/daisymanage.rst
doc/source/man/daisyregistry.rst
doc/source/man/daisyreplicator.rst
doc/source/man/daisyscrubber.rst
doc/source/man/openstack_options.rst
etc/daisy-api-paste.ini
etc/daisy-api.conf
etc/daisy-cache.conf
etc/daisy-manage.conf
etc/daisy-registry-paste.ini
etc/daisy-registry.conf
etc/daisy-scrubber.conf
etc/daisy-search-paste.ini
etc/daisy-search.conf
etc/daisy-swift.conf.sample
etc/policy.json
etc/property-protections-policies.conf.sample
etc/property-protections-roles.conf.sample
etc/schema-image.json
etc/search-policy.json
etc/metadefs/README
etc/metadefs/compute-aggr-disk-filter.json
etc/metadefs/compute-aggr-iops-filter.json
etc/metadefs/compute-aggr-num-instances.json
etc/metadefs/compute-guest-shutdown.json
etc/metadefs/compute-host-capabilities.json
etc/metadefs/compute-hypervisor.json
etc/metadefs/compute-instance-data.json
etc/metadefs/compute-libvirt-image.json
etc/metadefs/compute-libvirt.json
etc/metadefs/compute-quota.json
etc/metadefs/compute-randomgen.json
etc/metadefs/compute-trust.json
etc/metadefs/compute-vcputopology.json
etc/metadefs/compute-vmware-flavor.json
etc/metadefs/compute-vmware-quota-flavor.json
etc/metadefs/compute-vmware.json
etc/metadefs/compute-watchdog.json
etc/metadefs/compute-xenapi.json
etc/metadefs/daisy-common-image-props.json
etc/metadefs/operating-system.json
etc/metadefs/software-databases.json
etc/metadefs/software-runtimes.json
etc/metadefs/software-webservers.json
etc/oslo-config-generator/daisy-api.conf
etc/oslo-config-generator/daisy-cache.conf
etc/oslo-config-generator/daisy-manage.conf
etc/oslo-config-generator/daisy-registry.conf
etc/oslo-config-generator/daisy-scrubber.conf
daisy/__init__.py
daisy/context.py
daisy/gateway.py
daisy/i18n.py
daisy/listener.py
daisy/location.py
daisy/notifier.py
daisy/opts.py
daisy/schema.py
daisy/scrubber.py
daisy/service.py
daisy/version.py
daisy.egg-info/PKG-INFO
daisy.egg-info/SOURCES.txt
daisy.egg-info/dependency_links.txt
daisy.egg-info/entry_points.txt
daisy.egg-info/not-zip-safe
daisy.egg-info/pbr.json
daisy.egg-info/requires.txt
daisy.egg-info/top_level.txt
daisy/api/__init__.py
daisy/api/authorization.py
daisy/api/cached_images.py
daisy/api/common.py
daisy/api/policy.py
daisy/api/property_protections.py
daisy/api/versions.py
daisy/api/middleware/__init__.py
daisy/api/middleware/cache.py
daisy/api/middleware/cache_manage.py
daisy/api/middleware/context.py
daisy/api/middleware/gzip.py
daisy/api/middleware/version_negotiation.py
daisy/api/v1/__init__.py
daisy/api/v1/controller.py
daisy/api/v1/filters.py
daisy/api/v1/images.py
daisy/api/v1/members.py
daisy/api/v1/router.py
daisy/api/v1/upload_utils.py
daisy/api/v2/__init__.py
daisy/api/v2/image_actions.py
daisy/api/v2/image_data.py
daisy/api/v2/image_members.py
daisy/api/v2/image_tags.py
daisy/api/v2/images.py
daisy/api/v2/metadef_namespaces.py
daisy/api/v2/metadef_objects.py
daisy/api/v2/metadef_properties.py
daisy/api/v2/metadef_resource_types.py
daisy/api/v2/metadef_tags.py
daisy/api/v2/router.py
daisy/api/v2/schemas.py
daisy/api/v2/tasks.py
daisy/api/v2/model/__init__.py
daisy/api/v2/model/metadef_namespace.py
daisy/api/v2/model/metadef_object.py
daisy/api/v2/model/metadef_property_item_type.py
daisy/api/v2/model/metadef_property_type.py
daisy/api/v2/model/metadef_resource_type.py
daisy/api/v2/model/metadef_tag.py
daisy/artifacts/__init__.py
daisy/async/__init__.py
daisy/async/taskflow_executor.py
daisy/async/utils.py
daisy/async/flows/__init__.py
daisy/async/flows/base_import.py
daisy/async/flows/convert.py
daisy/async/flows/introspect.py
daisy/cmd/__init__.py
daisy/cmd/agent_notification.py
daisy/cmd/api.py
daisy/cmd/cache_cleaner.py
daisy/cmd/cache_manage.py
daisy/cmd/cache_prefetcher.py
daisy/cmd/cache_pruner.py
daisy/cmd/control.py
daisy/cmd/index.py
daisy/cmd/manage.py
daisy/cmd/registry.py
daisy/cmd/replicator.py
daisy/cmd/scrubber.py
daisy/cmd/search.py
daisy/cmd/orchestration.py
daisy/common/__init__.py
daisy/common/auth.py
daisy/common/client.py
daisy/common/config.py
daisy/common/crypt.py
daisy/common/exception.py
daisy/common/jsonpatchvalidator.py
daisy/common/property_utils.py
daisy/common/rpc.py
daisy/common/semver_db.py
daisy/common/store_utils.py
daisy/common/swift_store_utils.py
daisy/common/utils.py
daisy/common/wsgi.py
daisy/common/wsme_utils.py
daisy/common/artifacts/__init__.py
daisy/common/artifacts/declarative.py
daisy/common/artifacts/definitions.py
daisy/common/artifacts/loader.py
daisy/common/artifacts/serialization.py
daisy/common/location_strategy/__init__.py
daisy/common/location_strategy/location_order.py
daisy/common/location_strategy/store_type.py
daisy/common/scripts/__init__.py
daisy/common/scripts/utils.py
daisy/common/scripts/image_import/__init__.py
daisy/common/scripts/image_import/main.py
daisy/contrib/__init__.py
daisy/contrib/plugins/__init__.py
daisy/contrib/plugins/artifacts_sample/__init__.py
daisy/contrib/plugins/artifacts_sample/base.py
daisy/contrib/plugins/artifacts_sample/setup.cfg
daisy/contrib/plugins/artifacts_sample/setup.py
daisy/contrib/plugins/artifacts_sample/v1/__init__.py
daisy/contrib/plugins/artifacts_sample/v1/artifact.py
daisy/contrib/plugins/artifacts_sample/v2/__init__.py
daisy/contrib/plugins/artifacts_sample/v2/artifact.py
daisy/contrib/plugins/image_artifact/__init__.py
daisy/contrib/plugins/image_artifact/requirements.txt
daisy/contrib/plugins/image_artifact/setup.cfg
daisy/contrib/plugins/image_artifact/setup.py
daisy/contrib/plugins/image_artifact/version_selector.py
daisy/contrib/plugins/image_artifact/v1/__init__.py
daisy/contrib/plugins/image_artifact/v1/image.py
daisy/contrib/plugins/image_artifact/v1_1/__init__.py
daisy/contrib/plugins/image_artifact/v1_1/image.py
daisy/contrib/plugins/image_artifact/v2/__init__.py
daisy/contrib/plugins/image_artifact/v2/image.py
daisy/db/__init__.py
daisy/db/metadata.py
daisy/db/migration.py
daisy/db/registry/__init__.py
daisy/db/registry/api.py
daisy/db/simple/__init__.py
daisy/db/simple/api.py
daisy/db/sqlalchemy/__init__.py
daisy/db/sqlalchemy/api.py
daisy/db/sqlalchemy/artifacts.py
daisy/db/sqlalchemy/metadata.py
daisy/db/sqlalchemy/models.py
daisy/db/sqlalchemy/models_artifacts.py
daisy/db/sqlalchemy/models_metadef.py
daisy/db/sqlalchemy/metadef_api/__init__.py
daisy/db/sqlalchemy/metadef_api/namespace.py
daisy/db/sqlalchemy/metadef_api/object.py
daisy/db/sqlalchemy/metadef_api/property.py
daisy/db/sqlalchemy/metadef_api/resource_type.py
daisy/db/sqlalchemy/metadef_api/resource_type_association.py
daisy/db/sqlalchemy/metadef_api/tag.py
daisy/db/sqlalchemy/metadef_api/utils.py
daisy/db/sqlalchemy/migrate_repo/README
daisy/db/sqlalchemy/migrate_repo/__init__.py
daisy/db/sqlalchemy/migrate_repo/manage.py
daisy/db/sqlalchemy/migrate_repo/migrate.cfg
daisy/db/sqlalchemy/migrate_repo/schema.py
daisy/db/sqlalchemy/migrate_repo/versions/001_add_images_table.py
daisy/db/sqlalchemy/migrate_repo/versions/002_add_image_properties_table.py
daisy/db/sqlalchemy/migrate_repo/versions/003_add_disk_format.py
daisy/db/sqlalchemy/migrate_repo/versions/003_sqlite_downgrade.sql
daisy/db/sqlalchemy/migrate_repo/versions/003_sqlite_upgrade.sql
daisy/db/sqlalchemy/migrate_repo/versions/004_add_checksum.py
daisy/db/sqlalchemy/migrate_repo/versions/005_size_big_integer.py
daisy/db/sqlalchemy/migrate_repo/versions/006_key_to_name.py
daisy/db/sqlalchemy/migrate_repo/versions/006_mysql_downgrade.sql
daisy/db/sqlalchemy/migrate_repo/versions/006_mysql_upgrade.sql
daisy/db/sqlalchemy/migrate_repo/versions/006_sqlite_downgrade.sql
daisy/db/sqlalchemy/migrate_repo/versions/006_sqlite_upgrade.sql
daisy/db/sqlalchemy/migrate_repo/versions/007_add_owner.py
daisy/db/sqlalchemy/migrate_repo/versions/008_add_image_members_table.py
daisy/db/sqlalchemy/migrate_repo/versions/009_add_mindisk_and_minram.py
daisy/db/sqlalchemy/migrate_repo/versions/010_default_update_at.py
daisy/db/sqlalchemy/migrate_repo/versions/011_make_mindisk_and_minram_notnull.py
daisy/db/sqlalchemy/migrate_repo/versions/011_sqlite_downgrade.sql
daisy/db/sqlalchemy/migrate_repo/versions/011_sqlite_upgrade.sql
daisy/db/sqlalchemy/migrate_repo/versions/012_id_to_uuid.py
daisy/db/sqlalchemy/migrate_repo/versions/013_add_protected.py
daisy/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql
daisy/db/sqlalchemy/migrate_repo/versions/014_add_image_tags_table.py
daisy/db/sqlalchemy/migrate_repo/versions/015_quote_swift_credentials.py
daisy/db/sqlalchemy/migrate_repo/versions/016_add_status_image_member.py
daisy/db/sqlalchemy/migrate_repo/versions/016_sqlite_downgrade.sql
daisy/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py
daisy/db/sqlalchemy/migrate_repo/versions/018_add_image_locations_table.py
daisy/db/sqlalchemy/migrate_repo/versions/019_migrate_image_locations.py
daisy/db/sqlalchemy/migrate_repo/versions/020_drop_images_table_location.py
daisy/db/sqlalchemy/migrate_repo/versions/021_set_engine_mysql_innodb.py
daisy/db/sqlalchemy/migrate_repo/versions/022_image_member_index.py
daisy/db/sqlalchemy/migrate_repo/versions/023_placeholder.py
daisy/db/sqlalchemy/migrate_repo/versions/024_placeholder.py
daisy/db/sqlalchemy/migrate_repo/versions/025_placeholder.py
daisy/db/sqlalchemy/migrate_repo/versions/026_add_location_storage_information.py
daisy/db/sqlalchemy/migrate_repo/versions/027_checksum_index.py
daisy/db/sqlalchemy/migrate_repo/versions/028_owner_index.py
daisy/db/sqlalchemy/migrate_repo/versions/029_location_meta_data_pickle_to_string.py
daisy/db/sqlalchemy/migrate_repo/versions/030_add_tasks_table.py
daisy/db/sqlalchemy/migrate_repo/versions/031_remove_duplicated_locations.py
daisy/db/sqlalchemy/migrate_repo/versions/032_add_task_info_table.py
daisy/db/sqlalchemy/migrate_repo/versions/033_add_location_status.py
daisy/db/sqlalchemy/migrate_repo/versions/034_add_virtual_size.py
daisy/db/sqlalchemy/migrate_repo/versions/035_add_metadef_tables.py
daisy/db/sqlalchemy/migrate_repo/versions/036_rename_metadef_schema_columns.py
daisy/db/sqlalchemy/migrate_repo/versions/037_add_changes_to_satisfy_models.py
daisy/db/sqlalchemy/migrate_repo/versions/037_sqlite_downgrade.sql
daisy/db/sqlalchemy/migrate_repo/versions/037_sqlite_upgrade.sql
daisy/db/sqlalchemy/migrate_repo/versions/038_add_metadef_tags_table.py
daisy/db/sqlalchemy/migrate_repo/versions/039_add_changes_to_satisfy_models_metadef.py
daisy/db/sqlalchemy/migrate_repo/versions/040_add_changes_to_satisfy_metadefs_tags.py
daisy/db/sqlalchemy/migrate_repo/versions/041_add_artifact_tables.py
daisy/db/sqlalchemy/migrate_repo/versions/__init__.py
daisy/domain/__init__.py
daisy/domain/proxy.py
daisy/hacking/__init__.py
daisy/hacking/checks.py
daisy/image_cache/__init__.py
daisy/image_cache/base.py
daisy/image_cache/cleaner.py
daisy/image_cache/client.py
daisy/image_cache/prefetcher.py
daisy/image_cache/pruner.py
daisy/image_cache/drivers/__init__.py
daisy/image_cache/drivers/base.py
daisy/image_cache/drivers/sqlite.py
daisy/image_cache/drivers/xattr.py
daisy/locale/daisy-log-critical.pot
daisy/locale/daisy-log-error.pot
daisy/locale/daisy-log-info.pot
daisy/locale/daisy-log-warning.pot
daisy/locale/daisy.pot
daisy/locale/en_GB/LC_MESSAGES/daisy-log-info.po
daisy/locale/fr/LC_MESSAGES/daisy-log-info.po
daisy/locale/pt_BR/LC_MESSAGES/daisy-log-info.po
daisy/openstack/__init__.py
daisy/openstack/common/README
daisy/openstack/common/__init__.py
daisy/openstack/common/_i18n.py
daisy/openstack/common/eventlet_backdoor.py
daisy/openstack/common/fileutils.py
daisy/openstack/common/local.py
daisy/openstack/common/loopingcall.py
daisy/openstack/common/service.py
daisy/openstack/common/systemd.py
daisy/openstack/common/threadgroup.py
daisy/quota/__init__.py
daisy/registry/__init__.py
daisy/registry/api/__init__.py
daisy/registry/api/v1/__init__.py
daisy/registry/api/v1/images.py
daisy/registry/api/v1/members.py
daisy/registry/api/v2/__init__.py
daisy/registry/api/v2/rpc.py
daisy/registry/client/__init__.py
daisy/registry/client/v1/__init__.py
daisy/registry/client/v1/api.py
daisy/registry/client/v1/client.py
daisy/registry/client/v2/__init__.py
daisy/registry/client/v2/api.py
daisy/registry/client/v2/client.py
daisy/search/__init__.py
daisy/search/api/__init__.py
daisy/search/api/v0_1/__init__.py
daisy/search/api/v0_1/router.py
daisy/search/api/v0_1/search.py
daisy/search/plugins/__init__.py
daisy/search/plugins/base.py
daisy/search/plugins/images.py
daisy/search/plugins/images_notification_handler.py
daisy/search/plugins/metadefs.py
daisy/search/plugins/metadefs_notification_handler.py
daisy/orchestration/__init__.py
daisy/orchestration/manager.py
daisy/tests/__init__.py
daisy/tests/stubs.py
daisy/tests/test_hacking.py
daisy/tests/utils.py
daisy/tests/etc/daisy-swift.conf
daisy/tests/etc/policy.json
daisy/tests/etc/property-protections-policies.conf
daisy/tests/etc/property-protections.conf
daisy/tests/etc/schema-image.json
daisy/tests/functional/__init__.py
daisy/tests/functional/store_utils.py
daisy/tests/functional/test_api.py
daisy/tests/functional/test_bin_daisy_cache_manage.py
daisy/tests/functional/test_cache_middleware.py
daisy/tests/functional/test_client_exceptions.py
daisy/tests/functional/test_client_redirects.py
daisy/tests/functional/test_daisy_manage.py
daisy/tests/functional/test_gzip_middleware.py
daisy/tests/functional/test_logging.py
daisy/tests/functional/test_reload.py
daisy/tests/functional/test_scrubber.py
daisy/tests/functional/test_sqlite.py
daisy/tests/functional/test_ssl.py
daisy/tests/functional/db/__init__.py
daisy/tests/functional/db/base.py
daisy/tests/functional/db/base_artifacts.py
daisy/tests/functional/db/base_metadef.py
daisy/tests/functional/db/test_registry.py
daisy/tests/functional/db/test_rpc_endpoint.py
daisy/tests/functional/db/test_simple.py
daisy/tests/functional/db/test_sqlalchemy.py
daisy/tests/functional/v1/__init__.py
daisy/tests/functional/v1/test_api.py
daisy/tests/functional/v1/test_copy_to_file.py
daisy/tests/functional/v1/test_misc.py
daisy/tests/functional/v1/test_multiprocessing.py
daisy/tests/functional/v2/__init__.py
daisy/tests/functional/v2/registry_data_api.py
daisy/tests/functional/v2/test_images.py
daisy/tests/functional/v2/test_metadef_namespaces.py
daisy/tests/functional/v2/test_metadef_objects.py
daisy/tests/functional/v2/test_metadef_properties.py
daisy/tests/functional/v2/test_metadef_resourcetypes.py
daisy/tests/functional/v2/test_metadef_tags.py
daisy/tests/functional/v2/test_schemas.py
daisy/tests/functional/v2/test_tasks.py
daisy/tests/integration/__init__.py
daisy/tests/integration/legacy_functional/__init__.py
daisy/tests/integration/legacy_functional/base.py
daisy/tests/integration/legacy_functional/test_v1_api.py
daisy/tests/integration/v2/__init__.py
daisy/tests/integration/v2/base.py
daisy/tests/integration/v2/test_property_quota_violations.py
daisy/tests/integration/v2/test_tasks_api.py
daisy/tests/unit/__init__.py
daisy/tests/unit/base.py
daisy/tests/unit/fake_rados.py
daisy/tests/unit/test_artifact_type_definition_framework.py
daisy/tests/unit/test_artifacts_plugin_loader.py
daisy/tests/unit/test_auth.py
daisy/tests/unit/test_cache_middleware.py
daisy/tests/unit/test_cached_images.py
daisy/tests/unit/test_context.py
daisy/tests/unit/test_context_middleware.py
daisy/tests/unit/test_db.py
daisy/tests/unit/test_db_metadef.py
daisy/tests/unit/test_domain.py
daisy/tests/unit/test_domain_proxy.py
daisy/tests/unit/test_gateway.py
daisy/tests/unit/test_daisy_replicator.py
daisy/tests/unit/test_image_cache.py
daisy/tests/unit/test_image_cache_client.py
daisy/tests/unit/test_jsonpatchmixin.py
daisy/tests/unit/test_manage.py
daisy/tests/unit/test_migrations.py
daisy/tests/unit/test_misc.py
daisy/tests/unit/test_notifier.py
daisy/tests/unit/test_opts.py
daisy/tests/unit/test_policy.py
daisy/tests/unit/test_quota.py
daisy/tests/unit/test_schema.py
daisy/tests/unit/test_scrubber.py
daisy/tests/unit/test_search.py
daisy/tests/unit/test_store_image.py
daisy/tests/unit/test_store_location.py
daisy/tests/unit/test_versions.py
daisy/tests/unit/utils.py
daisy/tests/unit/api/__init__.py
daisy/tests/unit/api/test_cmd.py
daisy/tests/unit/api/test_cmd_cache_manage.py
daisy/tests/unit/api/test_common.py
daisy/tests/unit/api/test_property_protections.py
daisy/tests/unit/api/middleware/__init__.py
daisy/tests/unit/api/middleware/test_cache_manage.py
daisy/tests/unit/async/__init__.py
daisy/tests/unit/async/test_async.py
daisy/tests/unit/async/test_taskflow_executor.py
daisy/tests/unit/async/flows/__init__.py
daisy/tests/unit/async/flows/test_convert.py
daisy/tests/unit/async/flows/test_import.py
daisy/tests/unit/async/flows/test_introspect.py
daisy/tests/unit/common/__init__.py
daisy/tests/unit/common/test_client.py
daisy/tests/unit/common/test_config.py
daisy/tests/unit/common/test_exception.py
daisy/tests/unit/common/test_location_strategy.py
daisy/tests/unit/common/test_property_utils.py
daisy/tests/unit/common/test_rpc.py
daisy/tests/unit/common/test_scripts.py
daisy/tests/unit/common/test_semver.py
daisy/tests/unit/common/test_swift_store_utils.py
daisy/tests/unit/common/test_utils.py
daisy/tests/unit/common/test_wsgi.py
daisy/tests/unit/common/test_wsgi_ipv6.py
daisy/tests/unit/common/scripts/__init__.py
daisy/tests/unit/common/scripts/test_scripts_utils.py
daisy/tests/unit/common/scripts/image_import/__init__.py
daisy/tests/unit/common/scripts/image_import/test_main.py
daisy/tests/unit/v0_1/test_search.py
daisy/tests/unit/v1/__init__.py
daisy/tests/unit/v1/test_api.py
daisy/tests/unit/v1/test_registry_api.py
daisy/tests/unit/v1/test_registry_client.py
daisy/tests/unit/v1/test_upload_utils.py
daisy/tests/unit/v2/__init__.py
daisy/tests/unit/v2/test_image_actions_resource.py
daisy/tests/unit/v2/test_image_data_resource.py
daisy/tests/unit/v2/test_image_members_resource.py
daisy/tests/unit/v2/test_image_tags_resource.py
daisy/tests/unit/v2/test_images_resource.py
daisy/tests/unit/v2/test_metadef_resources.py
daisy/tests/unit/v2/test_registry_api.py
daisy/tests/unit/v2/test_registry_client.py
daisy/tests/unit/v2/test_schemas_resource.py
daisy/tests/unit/v2/test_tasks_resource.py
daisy/tests/var/ca.crt
daisy/tests/var/ca.key
daisy/tests/var/certificate.crt
daisy/tests/var/privatekey.key
rally-jobs/README.rst
rally-jobs/daisy.yaml
rally-jobs/extra/README.rst
rally-jobs/extra/fake.img
rally-jobs/plugins/README.rst
rally-jobs/plugins/plugin_sample.py
tools/colorizer.py
tools/install_venv.py
tools/install_venv_common.py
tools/migrate_image_owners.py
tools/with_venv.sh

View File

@ -0,0 +1 @@

View File

@ -0,0 +1,43 @@
[console_scripts]
daisy-api = daisy.cmd.api:main
daisy-cache-cleaner = daisy.cmd.cache_cleaner:main
daisy-cache-manage = daisy.cmd.cache_manage:main
daisy-cache-prefetcher = daisy.cmd.cache_prefetcher:main
daisy-cache-pruner = daisy.cmd.cache_pruner:main
daisy-control = daisy.cmd.control:main
daisy-index = daisy.cmd.index:main
daisy-manage = daisy.cmd.manage:main
daisy-registry = daisy.cmd.registry:main
daisy-replicator = daisy.cmd.replicator:main
daisy-scrubber = daisy.cmd.scrubber:main
daisy-search = daisy.cmd.search:main
daisy-orchestration = daisy.cmd.orchestration:main
[daisy.common.image_location_strategy.modules]
location_order_strategy = daisy.common.location_strategy.location_order
store_type_strategy = daisy.common.location_strategy.store_type
[daisy.database.metadata_backend]
sqlalchemy = daisy.db.sqlalchemy.metadata
[daisy.database.migration_backend]
sqlalchemy = oslo.db.sqlalchemy.migration
[daisy.flows]
import = daisy.async.flows.base_import:get_flow
[daisy.flows.import]
convert = daisy.async.flows.convert:get_flow
introspect = daisy.async.flows.introspect:get_flow
[daisy.search.index_backend]
image = daisy.search.plugins.images:ImageIndex
metadef = daisy.search.plugins.metadefs:MetadefIndex
[oslo.config.opts]
daisy.api = daisy.opts:list_api_opts
daisy.cache = daisy.opts:list_cache_opts
daisy.manage = daisy.opts:list_manage_opts
daisy.registry = daisy.opts:list_registry_opts
daisy.scrubber = daisy.opts:list_scrubber_opts

View File

@ -0,0 +1 @@

View File

@ -0,0 +1 @@
{"is_release": true, "git_version": "93b0d5f"}

View File

@ -0,0 +1,40 @@
pbr>=0.6,!=0.7,<1.0
greenlet>=0.3.2
SQLAlchemy>=0.9.7,<=0.9.99
anyjson>=0.3.3
eventlet>=0.16.1,!=0.17.0
PasteDeploy>=1.5.0
Routes>=1.12.3,!=2.0
WebOb>=1.2.3
sqlalchemy-migrate>=0.9.5
httplib2>=0.7.5
kombu>=2.5.0
pycrypto>=2.6
iso8601>=0.1.9
ordereddict
oslo.config>=1.9.3,<1.10.0 # Apache-2.0
oslo.concurrency>=1.8.0,<1.9.0 # Apache-2.0
oslo.context>=0.2.0,<0.3.0 # Apache-2.0
oslo.utils>=1.4.0,<1.5.0 # Apache-2.0
stevedore>=1.3.0,<1.4.0 # Apache-2.0
taskflow>=0.7.1,<0.8.0
keystonemiddleware>=1.5.0,<1.6.0
WSME>=0.6
posix_ipc
python-swiftclient>=2.2.0,<2.5.0
oslo.vmware>=0.11.1,<0.12.0 # Apache-2.0
Paste
jsonschema>=2.0.0,<3.0.0
python-keystoneclient>=1.1.0,<1.4.0
pyOpenSSL>=0.11
six>=1.9.0
oslo.db>=1.7.0,<1.8.0 # Apache-2.0
oslo.i18n>=1.5.0,<1.6.0 # Apache-2.0
oslo.log>=1.0.0,<1.1.0 # Apache-2.0
oslo.messaging>=1.8.0,<1.9.0 # Apache-2.0
oslo.policy>=0.3.1,<0.4.0 # Apache-2.0
oslo.serialization>=1.4.0,<1.5.0 # Apache-2.0
retrying>=1.2.3,!=1.3.0 # Apache-2.0
osprofiler>=0.3.0 # Apache-2.0
glance_store>=0.3.0,<0.5.0 # Apache-2.0
semantic_version>=2.3.1

View File

@ -0,0 +1 @@
daisy

0
code/daisy/daisy/__init__.py Executable file
View File

View File

@ -0,0 +1,20 @@
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import paste.urlmap
def root_app_factory(loader, global_conf, **local_conf):
return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf)

View File

@ -0,0 +1,899 @@
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from daisy.common import exception
import daisy.domain.proxy
from daisy import i18n
_ = i18n._
def is_image_mutable(context, image):
"""Return True if the image is mutable in this context."""
if context.is_admin:
return True
if image.owner is None or context.owner is None:
return False
return image.owner == context.owner
def proxy_image(context, image):
if is_image_mutable(context, image):
return ImageProxy(image, context)
else:
return ImmutableImageProxy(image, context)
def is_member_mutable(context, member):
"""Return True if the image is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return member.member_id == context.owner
def proxy_member(context, member):
if is_member_mutable(context, member):
return member
else:
return ImmutableMemberProxy(member)
def is_task_mutable(context, task):
"""Return True if the task is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return task.owner == context.owner
def is_task_stub_mutable(context, task_stub):
"""Return True if the task stub is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return task_stub.owner == context.owner
def proxy_task(context, task):
if is_task_mutable(context, task):
return task
else:
return ImmutableTaskProxy(task)
def proxy_task_stub(context, task_stub):
if is_task_stub_mutable(context, task_stub):
return task_stub
else:
return ImmutableTaskStubProxy(task_stub)
class ImageRepoProxy(daisy.domain.proxy.Repo):
def __init__(self, image_repo, context):
self.context = context
self.image_repo = image_repo
proxy_kwargs = {'context': self.context}
super(ImageRepoProxy, self).__init__(image_repo,
item_proxy_class=ImageProxy,
item_proxy_kwargs=proxy_kwargs)
def get(self, image_id):
image = self.image_repo.get(image_id)
return proxy_image(self.context, image)
def list(self, *args, **kwargs):
images = self.image_repo.list(*args, **kwargs)
return [proxy_image(self.context, i) for i in images]
class ImageMemberRepoProxy(daisy.domain.proxy.Repo):
def __init__(self, member_repo, image, context):
self.member_repo = member_repo
self.image = image
self.context = context
super(ImageMemberRepoProxy, self).__init__(member_repo)
def get(self, member_id):
if (self.context.is_admin or
self.context.owner in (self.image.owner, member_id)):
member = self.member_repo.get(member_id)
return proxy_member(self.context, member)
else:
message = _("You cannot get image member for %s")
raise exception.Forbidden(message % member_id)
def list(self, *args, **kwargs):
members = self.member_repo.list(*args, **kwargs)
if (self.context.is_admin or
self.context.owner == self.image.owner):
return [proxy_member(self.context, m) for m in members]
for member in members:
if member.member_id == self.context.owner:
return [proxy_member(self.context, member)]
message = _("You cannot get image member for %s")
raise exception.Forbidden(message % self.image.image_id)
def remove(self, image_member):
if (self.image.owner == self.context.owner or
self.context.is_admin):
self.member_repo.remove(image_member)
else:
message = _("You cannot delete image member for %s")
raise exception.Forbidden(message
% self.image.image_id)
def add(self, image_member):
if (self.image.owner == self.context.owner or
self.context.is_admin):
self.member_repo.add(image_member)
else:
message = _("You cannot add image member for %s")
raise exception.Forbidden(message
% self.image.image_id)
def save(self, image_member, from_state=None):
if (self.context.is_admin or
self.context.owner == image_member.member_id):
self.member_repo.save(image_member, from_state=from_state)
else:
message = _("You cannot update image member %s")
raise exception.Forbidden(message % image_member.member_id)
class ImageFactoryProxy(daisy.domain.proxy.ImageFactory):
def __init__(self, image_factory, context):
self.image_factory = image_factory
self.context = context
kwargs = {'context': self.context}
super(ImageFactoryProxy, self).__init__(image_factory,
proxy_class=ImageProxy,
proxy_kwargs=kwargs)
def new_image(self, **kwargs):
owner = kwargs.pop('owner', self.context.owner)
if not self.context.is_admin:
if owner is None or owner != self.context.owner:
message = _("You are not permitted to create images "
"owned by '%s'.")
raise exception.Forbidden(message % owner)
return super(ImageFactoryProxy, self).new_image(owner=owner, **kwargs)
class ImageMemberFactoryProxy(object):
def __init__(self, image_member_factory, context):
self.image_member_factory = image_member_factory
self.context = context
def new_image_member(self, image, member_id):
owner = image.owner
if not self.context.is_admin:
if owner is None or owner != self.context.owner:
message = _("You are not permitted to create image members "
"for the image.")
raise exception.Forbidden(message)
if image.visibility == 'public':
message = _("Public images do not have members.")
raise exception.Forbidden(message)
return self.image_member_factory.new_image_member(image, member_id)
def _immutable_attr(target, attr, proxy=None):
def get_attr(self):
value = getattr(getattr(self, target), attr)
if proxy is not None:
value = proxy(value)
return value
def forbidden(self, *args, **kwargs):
resource = getattr(self, 'resource_name', 'resource')
message = _("You are not permitted to modify '%(attr)s' on this "
"%(resource)s.")
raise exception.Forbidden(message % {'attr': attr,
'resource': resource})
return property(get_attr, forbidden, forbidden)
class ImmutableLocations(list):
def forbidden(self, *args, **kwargs):
message = _("You are not permitted to modify locations "
"for this image.")
raise exception.Forbidden(message)
def __deepcopy__(self, memo):
return ImmutableLocations(copy.deepcopy(list(self), memo))
append = forbidden
extend = forbidden
insert = forbidden
pop = forbidden
remove = forbidden
reverse = forbidden
sort = forbidden
__delitem__ = forbidden
__delslice__ = forbidden
__iadd__ = forbidden
__imul__ = forbidden
__setitem__ = forbidden
__setslice__ = forbidden
class ImmutableProperties(dict):
def forbidden_key(self, key, *args, **kwargs):
message = _("You are not permitted to modify '%s' on this image.")
raise exception.Forbidden(message % key)
def forbidden(self, *args, **kwargs):
message = _("You are not permitted to modify this image.")
raise exception.Forbidden(message)
__delitem__ = forbidden_key
__setitem__ = forbidden_key
pop = forbidden
popitem = forbidden
setdefault = forbidden
update = forbidden
class ImmutableTags(set):
def forbidden(self, *args, **kwargs):
message = _("You are not permitted to modify tags on this image.")
raise exception.Forbidden(message)
add = forbidden
clear = forbidden
difference_update = forbidden
intersection_update = forbidden
pop = forbidden
remove = forbidden
symmetric_difference_update = forbidden
update = forbidden
class ImmutableImageProxy(object):
def __init__(self, base, context):
self.base = base
self.context = context
self.resource_name = 'image'
name = _immutable_attr('base', 'name')
image_id = _immutable_attr('base', 'image_id')
name = _immutable_attr('base', 'name')
status = _immutable_attr('base', 'status')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
visibility = _immutable_attr('base', 'visibility')
min_disk = _immutable_attr('base', 'min_disk')
min_ram = _immutable_attr('base', 'min_ram')
protected = _immutable_attr('base', 'protected')
locations = _immutable_attr('base', 'locations', proxy=ImmutableLocations)
checksum = _immutable_attr('base', 'checksum')
owner = _immutable_attr('base', 'owner')
disk_format = _immutable_attr('base', 'disk_format')
container_format = _immutable_attr('base', 'container_format')
size = _immutable_attr('base', 'size')
virtual_size = _immutable_attr('base', 'virtual_size')
extra_properties = _immutable_attr('base', 'extra_properties',
proxy=ImmutableProperties)
tags = _immutable_attr('base', 'tags', proxy=ImmutableTags)
def delete(self):
message = _("You are not permitted to delete this image.")
raise exception.Forbidden(message)
def get_member_repo(self):
member_repo = self.base.get_member_repo()
return ImageMemberRepoProxy(member_repo, self, self.context)
def get_data(self, *args, **kwargs):
return self.base.get_data(*args, **kwargs)
def set_data(self, *args, **kwargs):
message = _("You are not permitted to upload data for this image.")
raise exception.Forbidden(message)
class ImmutableMemberProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'image member'
id = _immutable_attr('base', 'id')
image_id = _immutable_attr('base', 'image_id')
member_id = _immutable_attr('base', 'member_id')
status = _immutable_attr('base', 'status')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
class ImmutableTaskProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'task'
task_id = _immutable_attr('base', 'task_id')
type = _immutable_attr('base', 'type')
status = _immutable_attr('base', 'status')
owner = _immutable_attr('base', 'owner')
expires_at = _immutable_attr('base', 'expires_at')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
input = _immutable_attr('base', 'input')
message = _immutable_attr('base', 'message')
result = _immutable_attr('base', 'result')
def run(self, executor):
self.base.run(executor)
def begin_processing(self):
message = _("You are not permitted to set status on this task.")
raise exception.Forbidden(message)
def succeed(self, result):
message = _("You are not permitted to set status on this task.")
raise exception.Forbidden(message)
def fail(self, message):
message = _("You are not permitted to set status on this task.")
raise exception.Forbidden(message)
class ImmutableTaskStubProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'task stub'
task_id = _immutable_attr('base', 'task_id')
type = _immutable_attr('base', 'type')
status = _immutable_attr('base', 'status')
owner = _immutable_attr('base', 'owner')
expires_at = _immutable_attr('base', 'expires_at')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
class ImageProxy(daisy.domain.proxy.Image):
def __init__(self, image, context):
self.image = image
self.context = context
super(ImageProxy, self).__init__(image)
def get_member_repo(self, **kwargs):
if self.image.visibility == 'public':
message = _("Public images do not have members.")
raise exception.Forbidden(message)
else:
member_repo = self.image.get_member_repo(**kwargs)
return ImageMemberRepoProxy(member_repo, self, self.context)
class TaskProxy(daisy.domain.proxy.Task):
def __init__(self, task):
self.task = task
super(TaskProxy, self).__init__(task)
class TaskFactoryProxy(daisy.domain.proxy.TaskFactory):
def __init__(self, task_factory, context):
self.task_factory = task_factory
self.context = context
super(TaskFactoryProxy, self).__init__(
task_factory,
task_proxy_class=TaskProxy)
def new_task(self, **kwargs):
owner = kwargs.get('owner', self.context.owner)
# NOTE(nikhil): Unlike Images, Tasks are expected to have owner.
# We currently do not allow even admins to set the owner to None.
if owner is not None and (owner == self.context.owner
or self.context.is_admin):
return super(TaskFactoryProxy, self).new_task(**kwargs)
else:
message = _("You are not permitted to create this task with "
"owner as: %s")
raise exception.Forbidden(message % owner)
class TaskRepoProxy(daisy.domain.proxy.TaskRepo):
def __init__(self, task_repo, context):
self.task_repo = task_repo
self.context = context
super(TaskRepoProxy, self).__init__(task_repo)
def get(self, task_id):
task = self.task_repo.get(task_id)
return proxy_task(self.context, task)
class TaskStubRepoProxy(daisy.domain.proxy.TaskStubRepo):
def __init__(self, task_stub_repo, context):
self.task_stub_repo = task_stub_repo
self.context = context
super(TaskStubRepoProxy, self).__init__(task_stub_repo)
def list(self, *args, **kwargs):
task_stubs = self.task_stub_repo.list(*args, **kwargs)
return [proxy_task_stub(self.context, t) for t in task_stubs]
# Metadef Namespace classes
def is_namespace_mutable(context, namespace):
"""Return True if the namespace is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return namespace.owner == context.owner
def proxy_namespace(context, namespace):
if is_namespace_mutable(context, namespace):
return namespace
else:
return ImmutableMetadefNamespaceProxy(namespace)
class ImmutableMetadefNamespaceProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'namespace'
namespace_id = _immutable_attr('base', 'namespace_id')
namespace = _immutable_attr('base', 'namespace')
display_name = _immutable_attr('base', 'display_name')
description = _immutable_attr('base', 'description')
owner = _immutable_attr('base', 'owner')
visibility = _immutable_attr('base', 'visibility')
protected = _immutable_attr('base', 'protected')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
def delete(self):
message = _("You are not permitted to delete this namespace.")
raise exception.Forbidden(message)
def save(self):
message = _("You are not permitted to update this namespace.")
raise exception.Forbidden(message)
class MetadefNamespaceProxy(daisy.domain.proxy.MetadefNamespace):
def __init__(self, namespace):
self.namespace_input = namespace
super(MetadefNamespaceProxy, self).__init__(namespace)
class MetadefNamespaceFactoryProxy(
daisy.domain.proxy.MetadefNamespaceFactory):
def __init__(self, meta_namespace_factory, context):
self.meta_namespace_factory = meta_namespace_factory
self.context = context
super(MetadefNamespaceFactoryProxy, self).__init__(
meta_namespace_factory,
meta_namespace_proxy_class=MetadefNamespaceProxy)
def new_namespace(self, **kwargs):
owner = kwargs.pop('owner', self.context.owner)
if not self.context.is_admin:
if owner is None or owner != self.context.owner:
message = _("You are not permitted to create namespace "
"owned by '%s'")
raise exception.Forbidden(message % (owner))
return super(MetadefNamespaceFactoryProxy, self).new_namespace(
owner=owner, **kwargs)
class MetadefNamespaceRepoProxy(daisy.domain.proxy.MetadefNamespaceRepo):
def __init__(self, namespace_repo, context):
self.namespace_repo = namespace_repo
self.context = context
super(MetadefNamespaceRepoProxy, self).__init__(namespace_repo)
def get(self, namespace):
namespace_obj = self.namespace_repo.get(namespace)
return proxy_namespace(self.context, namespace_obj)
def list(self, *args, **kwargs):
namespaces = self.namespace_repo.list(*args, **kwargs)
return [proxy_namespace(self.context, namespace) for
namespace in namespaces]
# Metadef Object classes
def is_object_mutable(context, object):
"""Return True if the object is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return object.namespace.owner == context.owner
def proxy_object(context, object):
if is_object_mutable(context, object):
return object
else:
return ImmutableMetadefObjectProxy(object)
class ImmutableMetadefObjectProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'object'
object_id = _immutable_attr('base', 'object_id')
name = _immutable_attr('base', 'name')
required = _immutable_attr('base', 'required')
description = _immutable_attr('base', 'description')
properties = _immutable_attr('base', 'properties')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
def delete(self):
message = _("You are not permitted to delete this object.")
raise exception.Forbidden(message)
def save(self):
message = _("You are not permitted to update this object.")
raise exception.Forbidden(message)
class MetadefObjectProxy(daisy.domain.proxy.MetadefObject):
def __init__(self, meta_object):
self.meta_object = meta_object
super(MetadefObjectProxy, self).__init__(meta_object)
class MetadefObjectFactoryProxy(daisy.domain.proxy.MetadefObjectFactory):
def __init__(self, meta_object_factory, context):
self.meta_object_factory = meta_object_factory
self.context = context
super(MetadefObjectFactoryProxy, self).__init__(
meta_object_factory,
meta_object_proxy_class=MetadefObjectProxy)
def new_object(self, **kwargs):
owner = kwargs.pop('owner', self.context.owner)
if not self.context.is_admin:
if owner is None or owner != self.context.owner:
message = _("You are not permitted to create object "
"owned by '%s'")
raise exception.Forbidden(message % (owner))
return super(MetadefObjectFactoryProxy, self).new_object(**kwargs)
class MetadefObjectRepoProxy(daisy.domain.proxy.MetadefObjectRepo):
def __init__(self, object_repo, context):
self.object_repo = object_repo
self.context = context
super(MetadefObjectRepoProxy, self).__init__(object_repo)
def get(self, namespace, object_name):
meta_object = self.object_repo.get(namespace, object_name)
return proxy_object(self.context, meta_object)
def list(self, *args, **kwargs):
objects = self.object_repo.list(*args, **kwargs)
return [proxy_object(self.context, meta_object) for
meta_object in objects]
# Metadef ResourceType classes
def is_meta_resource_type_mutable(context, meta_resource_type):
"""Return True if the meta_resource_type is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
# (lakshmiS): resource type can exist without an association with
# namespace and resource type cannot be created/update/deleted directly(
# they have to be associated/de-associated from namespace)
if meta_resource_type.namespace:
return meta_resource_type.namespace.owner == context.owner
else:
return False
def proxy_meta_resource_type(context, meta_resource_type):
if is_meta_resource_type_mutable(context, meta_resource_type):
return meta_resource_type
else:
return ImmutableMetadefResourceTypeProxy(meta_resource_type)
class ImmutableMetadefResourceTypeProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'meta_resource_type'
namespace = _immutable_attr('base', 'namespace')
name = _immutable_attr('base', 'name')
prefix = _immutable_attr('base', 'prefix')
properties_target = _immutable_attr('base', 'properties_target')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
def delete(self):
message = _("You are not permitted to delete this meta_resource_type.")
raise exception.Forbidden(message)
class MetadefResourceTypeProxy(daisy.domain.proxy.MetadefResourceType):
def __init__(self, meta_resource_type):
self.meta_resource_type = meta_resource_type
super(MetadefResourceTypeProxy, self).__init__(meta_resource_type)
class MetadefResourceTypeFactoryProxy(
daisy.domain.proxy.MetadefResourceTypeFactory):
def __init__(self, resource_type_factory, context):
self.meta_resource_type_factory = resource_type_factory
self.context = context
super(MetadefResourceTypeFactoryProxy, self).__init__(
resource_type_factory,
resource_type_proxy_class=MetadefResourceTypeProxy)
def new_resource_type(self, **kwargs):
owner = kwargs.pop('owner', self.context.owner)
if not self.context.is_admin:
if owner is None or owner != self.context.owner:
message = _("You are not permitted to create resource_type "
"owned by '%s'")
raise exception.Forbidden(message % (owner))
return super(MetadefResourceTypeFactoryProxy, self).new_resource_type(
**kwargs)
class MetadefResourceTypeRepoProxy(
daisy.domain.proxy.MetadefResourceTypeRepo):
def __init__(self, meta_resource_type_repo, context):
self.meta_resource_type_repo = meta_resource_type_repo
self.context = context
super(MetadefResourceTypeRepoProxy, self).__init__(
meta_resource_type_repo)
def list(self, *args, **kwargs):
meta_resource_types = self.meta_resource_type_repo.list(
*args, **kwargs)
return [proxy_meta_resource_type(self.context, meta_resource_type) for
meta_resource_type in meta_resource_types]
def get(self, *args, **kwargs):
meta_resource_type = self.meta_resource_type_repo.get(*args, **kwargs)
return proxy_meta_resource_type(self.context, meta_resource_type)
# Metadef namespace properties classes
def is_namespace_property_mutable(context, namespace_property):
"""Return True if the object is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return namespace_property.namespace.owner == context.owner
def proxy_namespace_property(context, namespace_property):
if is_namespace_property_mutable(context, namespace_property):
return namespace_property
else:
return ImmutableMetadefPropertyProxy(namespace_property)
class ImmutableMetadefPropertyProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'namespace_property'
property_id = _immutable_attr('base', 'property_id')
name = _immutable_attr('base', 'name')
schema = _immutable_attr('base', 'schema')
def delete(self):
message = _("You are not permitted to delete this property.")
raise exception.Forbidden(message)
def save(self):
message = _("You are not permitted to update this property.")
raise exception.Forbidden(message)
class MetadefPropertyProxy(daisy.domain.proxy.MetadefProperty):
def __init__(self, namespace_property):
self.meta_object = namespace_property
super(MetadefPropertyProxy, self).__init__(namespace_property)
class MetadefPropertyFactoryProxy(daisy.domain.proxy.MetadefPropertyFactory):
def __init__(self, namespace_property_factory, context):
self.meta_object_factory = namespace_property_factory
self.context = context
super(MetadefPropertyFactoryProxy, self).__init__(
namespace_property_factory,
property_proxy_class=MetadefPropertyProxy)
def new_namespace_property(self, **kwargs):
owner = kwargs.pop('owner', self.context.owner)
if not self.context.is_admin:
if owner is None or owner != self.context.owner:
message = _("You are not permitted to create property "
"owned by '%s'")
raise exception.Forbidden(message % (owner))
return super(MetadefPropertyFactoryProxy, self).new_namespace_property(
**kwargs)
class MetadefPropertyRepoProxy(daisy.domain.proxy.MetadefPropertyRepo):
def __init__(self, namespace_property_repo, context):
self.namespace_property_repo = namespace_property_repo
self.context = context
super(MetadefPropertyRepoProxy, self).__init__(namespace_property_repo)
def get(self, namespace, object_name):
namespace_property = self.namespace_property_repo.get(namespace,
object_name)
return proxy_namespace_property(self.context, namespace_property)
def list(self, *args, **kwargs):
namespace_properties = self.namespace_property_repo.list(
*args, **kwargs)
return [proxy_namespace_property(self.context, namespace_property) for
namespace_property in namespace_properties]
# Metadef Tag classes
def is_tag_mutable(context, tag):
"""Return True if the tag is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return tag.namespace.owner == context.owner
def proxy_tag(context, tag):
if is_tag_mutable(context, tag):
return tag
else:
return ImmutableMetadefTagProxy(tag)
class ImmutableMetadefTagProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'tag'
tag_id = _immutable_attr('base', 'tag_id')
name = _immutable_attr('base', 'name')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
def delete(self):
message = _("You are not permitted to delete this tag.")
raise exception.Forbidden(message)
def save(self):
message = _("You are not permitted to update this tag.")
raise exception.Forbidden(message)
class MetadefTagProxy(daisy.domain.proxy.MetadefTag):
pass
class MetadefTagFactoryProxy(daisy.domain.proxy.MetadefTagFactory):
def __init__(self, meta_tag_factory, context):
self.meta_tag_factory = meta_tag_factory
self.context = context
super(MetadefTagFactoryProxy, self).__init__(
meta_tag_factory,
meta_tag_proxy_class=MetadefTagProxy)
def new_tag(self, **kwargs):
owner = kwargs.pop('owner', self.context.owner)
if not self.context.is_admin:
if owner is None:
message = _("Owner must be specified to create a tag.")
raise exception.Forbidden(message)
elif owner != self.context.owner:
message = _("You are not permitted to create a tag"
" in the namespace owned by '%s'")
raise exception.Forbidden(message % (owner))
return super(MetadefTagFactoryProxy, self).new_tag(**kwargs)
class MetadefTagRepoProxy(daisy.domain.proxy.MetadefTagRepo):
def __init__(self, tag_repo, context):
self.tag_repo = tag_repo
self.context = context
super(MetadefTagRepoProxy, self).__init__(tag_repo)
def get(self, namespace, tag_name):
meta_tag = self.tag_repo.get(namespace, tag_name)
return proxy_tag(self.context, meta_tag)
def list(self, *args, **kwargs):
tags = self.tag_repo.list(*args, **kwargs)
return [proxy_tag(self.context, meta_tag) for
meta_tag in tags]

View File

View File

@ -0,0 +1,235 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for tecs API
"""
import copy
import subprocess
import time
import traceback
import webob.exc
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread
from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
import daisy.registry.client.v1.api as registry
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
daisy_path = '/var/lib/daisy/'
tecs_backend_name = "tecs"
zenic_backend_name = "zenic"
proton_backend_name = "proton"
os_install_start_time = 0.0
def subprocess_call(command,file=None):
if file:
return_code = subprocess.call(command,
shell=True,
stdout=file,
stderr=file)
else:
return_code = subprocess.call(command,
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
if return_code != 0:
msg = "execute '%s' failed by subprocess call." % command
raise exception.SubprocessCmdFailed(msg)
def get_host_detail(req, host_id):
try:
host_detail = registry.get_host_metadata(req.context, host_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return host_detail
def get_roles_detail(req):
try:
roles = registry.get_roles_detail(req.context)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return roles
def get_cluster_roles_detail(req, cluster_id):
try:
params = {'cluster_id':cluster_id}
roles = registry.get_roles_detail(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return roles
def get_hosts_of_role(req, role_id):
try:
hosts = registry.get_role_host_metadata(req.context, role_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return hosts
def get_role_detail(req, role_id):
try:
role = registry.get_role_metadata(req.context, role_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return role
def update_role(req, role_id,role_meta):
try:
registry.update_role_metadata(req.context, role_id, role_meta)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
def update_role_host(req, role_id, role_host):
try:
registry.update_role_host_metadata(req.context, role_id, role_host)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
def delete_role_hosts(req, role_id):
try:
registry.delete_role_host_metadata(req.context, role_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
def get_cluster_networks_detail(req, cluster_id):
try:
networks = registry.get_networks_detail(req.context, cluster_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return networks
def get_assigned_network(req, host_interface_id, network_id):
try:
assigned_network = registry.get_assigned_network(req.context, host_interface_id, network_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return assigned_network
def _ping_hosts_test(ips):
ping_cmd = 'fping'
for ip in set(ips):
ping_cmd = ping_cmd + ' ' + ip
obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
_returncode = obj.returncode
if _returncode == 0 or _returncode == 1:
ping_result = stdoutput.split('\n')
unreachable_hosts = [result.split()[0] for result in ping_result if result and result.split()[2] != 'alive']
else:
msg = "ping failed beaceuse there is invlid ip in %s" % ips
raise exception.InvalidIP(msg)
return unreachable_hosts
def check_ping_hosts(ping_ips, max_ping_times):
if not ping_ips:
LOG.info(_("no ip got for ping test"))
return ping_ips
ping_count = 0
time_step = 5
LOG.info(_("begin ping test for %s" % ','.join(ping_ips)))
while True:
if ping_count == 0:
ips = _ping_hosts_test(ping_ips)
else:
ips = _ping_hosts_test(ips)
ping_count += 1
if ips:
LOG.debug(_("ping host %s for %s times" % (','.join(ips), ping_count)))
if ping_count >= max_ping_times:
LOG.info(_("ping host %s timeout for %ss" % (','.join(ips), ping_count*time_step)))
return ips
time.sleep(time_step)
else:
LOG.info(_("ping %s successfully" % ','.join(ping_ips)))
return ips
def _ping_reachable_to_unreachable_host_test(ip,max_ping_times):
ping_cmd = 'fping'
ping_cmd = ping_cmd + ' ' + ip
ping_count = 0
time_step = 5
while True:
obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
_returncode = obj.returncode
if _returncode != 0:
return True
ping_count += 1
if ping_count >= max_ping_times:
LOG.info(_("ping host %s timeout for %ss" % (ip, ping_count*time_step)))
return False
time.sleep(time_step)
return False
def _ping_unreachable_to_reachable_host_test(ip, max_ping_times):
ping_count = 0
time_step = 5
ping_cmd = 'fping'
ping_cmd = ping_cmd + ' ' + ip
while True:
obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
_returncode = obj.returncode
if _returncode == 0:
return True
ping_count += 1
if ping_count >= max_ping_times:
LOG.info(_("ping host %s timeout for %ss" % (ip, ping_count*time_step)))
return False
time.sleep(time_step)
return False
def check_reboot_ping(ip):
stop_max_ping_times = 360 #ha host reboot may spend 20 min,so timeout time is 30min
start_max_ping_times = 60
_ping_reachable_to_unreachable_host_test(ip, stop_max_ping_times)
_ping_unreachable_to_reachable_host_test(ip, start_max_ping_times)
time.sleep(5)
def cidr_to_netmask(cidr):
ip_netmask = cidr.split('/')
if len(ip_netmask) != 2 or not ip_netmask[1]:
raise exception.InvalidNetworkConfig("cidr is not valid")
cidr_end = ip_netmask[1]
mask = ~(2**(32 - int(cidr_end)) - 1)
inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)])
netmask = inter_ip(mask)
return netmask

View File

@ -0,0 +1,85 @@
# -*- coding: utf-8 -*-
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver base-classes:
(Beginning of) the contract that deployment backends drivers must follow, and shared
types that support that contract
"""
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from daisy import i18n
from daisy.common import exception
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
LOG = logging.getLogger(__name__)
class DeploymentDriver(object):
"""base class for deployment interface.
"""
def install(self, req, cluster_id):
raise NotImplementedError()
def upgrade(self, req, cluster_id):
raise NotImplementedError()
def uninstall(self, req, cluster_id):
raise NotImplementedError()
def uninstall_progress(self, req, cluster_id):
LOG.info(_("driver no interface for 'uninstall_progress'"))
return {}
def upgrade_progress(self, req, cluster_id):
LOG.info(_("driver no interface for 'upgrade_progress'"))
return {}
def exprot_db(self, req, cluster_id):
LOG.info(_("driver no interface for 'exprot_db'"))
return {}
def update_disk_array(self, req, cluster_id):
LOG.info(_("driver no interface for 'update_disk_array'"))
return {}
def check_isinstance(obj, cls):
"""Checks that obj is of type cls, and lets PyLint infer types."""
if isinstance(obj, cls):
return obj
raise Exception(_('Expected object of type: %s') % (str(cls)))
def load_deployment_dirver(backend_name):
"""Load a cluster backend installation driver.
"""
backend_driver = "%s.api.API" % backend_name
LOG.info(_("Loading deployment backend '%s'") % backend_driver)
try:
driver = importutils.import_object_ns('daisy.api.backends',backend_driver)
return check_isinstance(driver, DeploymentDriver)
except ImportError:
LOG.exception(_("Error, unable to load the deployment backends '%s'" % backend_driver))
return None

View File

@ -0,0 +1,742 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for tecs API
"""
import copy
import subprocess
import time
import traceback
import webob.exc
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from webob.exc import HTTPServerError
import threading
from threading import Thread
from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
import daisy.registry.client.v1.api as registry
from daisy.api.backends.tecs import config
from daisy.api.backends import driver
from daisy.api.network_api import network as neutron
from ironicclient import client as ironic_client
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
CONF = cfg.CONF
install_opts = [
cfg.StrOpt('max_parallel_os_number', default=10,
help='Maximum number of hosts install os at the same time.'),
]
CONF.register_opts(install_opts)
upgrade_opts = [
cfg.StrOpt('max_parallel_os_upgrade_number', default=10,
help='Maximum number of hosts upgrade os at the same time.'),
]
CONF.register_opts(upgrade_opts)
host_os_status = {
'INIT' : 'init',
'INSTALLING' : 'installing',
'ACTIVE' : 'active',
'INSTALL_FAILED': 'install-failed',
'UPDATING': 'updating',
'UPDATE_FAILED': 'update-failed'
}
LINUX_BOND_MODE = {'balance-rr':'0', 'active-backup':'1', 'balance-xor':'2', 'broadcast':'3','802.3ad':'4', 'balance-tlb':'5', 'balance-alb':'6'}
daisy_tecs_path = tecs_cmn.daisy_tecs_path
def get_ironicclient(): # pragma: no cover
"""Get Ironic client instance."""
args = {'os_auth_token': 'fake',
'ironic_url':'http://127.0.0.1:6385/v1'}
return ironic_client.get_client(1, **args)
def pxe_server_build(req, install_meta):
cluster_id = install_meta['cluster_id']
try:
networks = registry.get_networks_detail(req.context, cluster_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
try:
ip_inter = lambda x:sum([256**j*int(i) for j,i in enumerate(x.split('.')[::-1])])
inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)])
network_cidr = [network['cidr'] for network in networks if network['name'] == 'DEPLOYMENT'][0]
if not network_cidr:
msg = "Error:The CIDR is blank of DEPLOYMENT!"
raise exception.Forbidden(msg)
cidr_end=network_cidr.split('/')[1]
ip_addr = network_cidr.split('/')[0]
ip_addr_int=ip_inter(ip_addr)
mask = ~(2**(32 - int(cidr_end)) - 1)
net_mask=inter_ip(mask)
ip_addr_min = inter_ip(ip_addr_int & (mask & 0xffffffff))
ip_addr_max = inter_ip(ip_addr_int | (~mask & 0xffffffff))
pxe_server_ip=inter_ip((ip_inter(ip_addr_min))+1)
client_ip_begin=inter_ip((ip_inter(ip_addr_min))+2)
client_ip_end=ip_addr_max
args = {'build_pxe': 'yes', 'eth_name': install_meta['deployment_interface'], 'ip_address': pxe_server_ip, 'net_mask': net_mask,
'client_ip_begin': client_ip_begin, 'client_ip_end': client_ip_end}
ironic = get_ironicclient()
ironic.daisy.build_pxe(**args)
except exception.Invalid as e:
msg = "build pxe server failed"
raise exception.InvalidNetworkConfig(msg)
def _get_network_plat(req,host_config, cluster_networks, dhcp_mac):
host_config['dhcp_mac'] = dhcp_mac
if host_config['interfaces']:
count = 0
host_config_orig = copy.deepcopy(host_config)
for interface in host_config['interfaces']:
count += 1
if (interface.has_key('assigned_networks') and
interface['assigned_networks']):
assigned_networks = copy.deepcopy(interface['assigned_networks'])
host_config['interfaces'][count-1]['assigned_networks'] = []
alias = []
for assigned_network in assigned_networks:
network_name = assigned_network['name']
cluster_network = [network for network in cluster_networks if network['name'] in network_name][0]
alias.append(cluster_network['alias'])
# convert cidr to netmask
cidr_to_ip = ""
assigned_networks_ip=tecs_cmn.get_host_network_ip(req, host_config_orig, cluster_networks, network_name)
if cluster_network.get('cidr', None):
inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)])
cidr_to_ip = inter_ip(2**32-2**(32-int(cluster_network['cidr'].split('/')[1])))
if cluster_network['alias'] == None or len(alias) == 1:
network_type = cluster_network['network_type']
network_plat = dict(network_type=network_type,
ml2_type=cluster_network['ml2_type'],
capability=cluster_network['capability'],
physnet_name=cluster_network['physnet_name'],
gateway=cluster_network.get('gateway', ""),
ip=assigned_networks_ip,
#ip=cluster_network.get('ip', ""),
netmask=cidr_to_ip,
vlan_id=cluster_network.get('vlan_id', ""))
if network_type == "MANAGEMENT" and cluster_network.get('gateway', "") == "":
msg = "Error: The gateway of network 'MANAGEMENT' is not given!"
raise exception.Forbidden(msg)
host_config['interfaces'][count-1]['assigned_networks'].append(network_plat)
interface['ip']=""
interface['netmask']=""
interface['gateway']=""
return host_config
def get_cluster_hosts_config(req, cluster_id):
params = dict(limit=1000000)
try:
cluster_data = registry.get_cluster_metadata(req.context, cluster_id)
networks = registry.get_networks_detail(req.context, cluster_id)
all_roles = registry.get_roles_detail(req.context)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
roles = [role for role in all_roles if role['cluster_id'] == cluster_id]
all_hosts_ids = cluster_data['nodes']
hosts_config = []
for host_id in all_hosts_ids:
host_detail = daisy_cmn.get_host_detail(req, host_id)
role_host_db_lv_size_lists = list()
if host_detail.has_key('role') and host_detail['role']:
host_roles = host_detail['role']
for role in roles:
if role['name'] in host_detail['role'] and role['glance_lv_size']:
host_detail['glance_lv_size'] = role['glance_lv_size']
if role.get('db_lv_size', None) and host_roles and role['name'] in host_roles:
role_host_db_lv_size_lists.append(role['db_lv_size'])
if role['name'] == 'COMPUTER' and role['name'] in host_detail['role'] and role['nova_lv_size']:
host_detail['nova_lv_size'] = role['nova_lv_size']
service_disks = tecs_cmn.get_service_disk_list(req, {'role_id':role['id']})
for service_disk in service_disks:
if service_disk['disk_location'] == 'local' and service_disk['service'] == 'mongodb':
host_detail['mongodb_lv_size'] = service_disk['size']
break
if role_host_db_lv_size_lists:
host_detail['db_lv_size'] = max(role_host_db_lv_size_lists)
else:
host_detail['db_lv_size'] = 0
for interface in host_detail['interfaces']:
if interface['type'] == 'bond'and interface['mode'] in LINUX_BOND_MODE.keys():
interface['mode'] = LINUX_BOND_MODE[interface['mode']]
if (host_detail['os_status'] == host_os_status['INIT'] or
host_detail['os_status'] == host_os_status['INSTALLING'] or
host_detail['os_status'] == host_os_status['INSTALL_FAILED']):
host_dhcp_interface = [hi for hi in host_detail['interfaces'] if hi['is_deployment']]
if not host_dhcp_interface:
msg = "cann't find dhcp interface on host %s" % host_detail['id']
raise exception.InvalidNetworkConfig(msg)
if len(host_dhcp_interface) > 1:
msg = "dhcp interface should only has one on host %s" % host_detail['id']
raise exception.InvalidNetworkConfig(msg)
host_config_detail = copy.deepcopy(host_detail)
host_config = _get_network_plat(req,host_config_detail,
networks,
host_dhcp_interface[0]['mac'])
hosts_config.append(tecs_cmn.sort_interfaces_by_pci(host_config))
return hosts_config
def check_tfg_exist():
get_tfg_patch = "ls %s|grep CGSL_VPLAT-.*\.iso$" % daisy_tecs_path
obj = subprocess.Popen(get_tfg_patch,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
tfg_patch_pkg_file = ""
tfg_patch_pkg_name = ""
if stdoutput:
tfg_patch_pkg_name = stdoutput.split('\n')[0]
tfg_patch_pkg_file = daisy_tecs_path + tfg_patch_pkg_name
chmod_for_tfg_bin = 'chmod +x %s' % tfg_patch_pkg_file
daisy_cmn.subprocess_call(chmod_for_tfg_bin)
if not stdoutput or not tfg_patch_pkg_name:
LOG.info(_("no CGSL_VPLAT iso file got in %s" % daisy_tecs_path))
return ""
return tfg_patch_pkg_file
def update_db_host_status(req, host_id, host_status):
"""
Update host status and intallation progress to db.
:return:
"""
try:
host_meta = {}
host_meta['os_progress'] = host_status['os_progress']
host_meta['os_status'] = host_status['os_status']
host_meta['messages'] = host_status['messages']
registry.update_host_metadata(req.context,
host_id,
host_meta)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
class OSInstall():
"""
Class for install OS.
"""
""" Definition for install states."""
def __init__(self, req, cluster_id):
self.req = req
self.cluster_id = cluster_id
#5s
self.time_step = 5
# 30 min
self.single_host_install_timeout = 30 * (12*self.time_step)
self.max_parallel_os_num = int(CONF.max_parallel_os_number)
self.cluster_hosts_install_timeout = (self.max_parallel_os_num/4 + 2 )* 60 * (12*self.time_step)
self.ironicclient = get_ironicclient()
def _set_boot_or_power_state(self, user, passwd, addr, action):
count = 0
repeat_times = 24
while count < repeat_times:
set_obj = self.ironicclient.daisy.set_boot_or_power_state(user,
passwd,
addr,
action)
set_dict = dict([(f, getattr(set_obj, f, '')) for f in ['return_code', 'info']])
rc = int(set_dict['return_code'])
if rc == 0:
LOG.info(_("set %s to '%s' successfully for %s times by ironic" % (addr,action,count+1)))
break
else:
count += 1
LOG.info(_("try setting %s to '%s' failed for %s times by ironic" % (addr,action,count)))
time.sleep(count*2)
if count >= repeat_times:
message = "set %s to '%s' failed for 10 mins" % (addr,action)
raise exception.IMPIOprationFailed(message=message)
def _baremetal_install_os(self, host_detail):
# os_install_disk = 'sda'
os_version_file = host_detail['os_version_file']
if os_version_file:
test_os_version_exist = 'test -f %s' % os_version_file
daisy_cmn.subprocess_call(test_os_version_exist)
else:
self.message = "no OS version file configed for host %s" % host_detail['id']
raise exception.NotFound(message=self.message)
if host_detail.get('root_disk',None):
root_disk = host_detail['root_disk']
else:
root_disk = 'sda'
if host_detail.get('root_lv_size',None):
root_lv_size_m = host_detail['root_lv_size']
else:
root_lv_size_m = 51200
memory_size_b_str = str(host_detail['memory']['total'])
memory_size_b_int = int(memory_size_b_str.strip().split()[0])
memory_size_m = memory_size_b_int//1024
memory_size_g = memory_size_m//1024
swap_lv_size_m = host_detail['swap_lv_size']
cinder_vg_size_m = 0
disk_list = []
disk_storage_size_b = 0
for key in host_detail['disks']:
disk_list.append(host_detail['disks'][key]['name'])
stroage_size_str = host_detail['disks'][key]['size']
stroage_size_b_int = int(stroage_size_str.strip().split()[0])
disk_storage_size_b = disk_storage_size_b + stroage_size_b_int
disk_list = ','.join(disk_list)
disk_storage_size_m = disk_storage_size_b//(1024*1024)
if host_detail.has_key('root_pwd') and host_detail['root_pwd']:
root_pwd = host_detail['root_pwd']
else:
root_pwd = 'ossdbg1'
if host_detail.has_key('isolcpus') and host_detail['isolcpus']:
isolcpus = host_detail['isolcpus']
else:
isolcpus = None
if host_detail.get('hugepages',None):
hugepages = host_detail['hugepages']
else:
hugepages = 0
if host_detail.get('hugepagesize',None):
hugepagesize = host_detail['hugepagesize']
else:
hugepagesize = '1G'
#tfg_patch_pkg_file = check_tfg_exist()
if (not host_detail['ipmi_user'] or
not host_detail['ipmi_passwd'] or
not host_detail['ipmi_addr'] ):
self.message = "Invalid ipmi information configed for host %s" % host_detail['id']
raise exception.NotFound(message=self.message)
self._set_boot_or_power_state(host_detail['ipmi_user'],
host_detail['ipmi_passwd'],
host_detail['ipmi_addr'],
'pxe')
kwargs = {'hostname':host_detail['name'],
'iso_path':os_version_file,
#'tfg_bin':tfg_patch_pkg_file,
'dhcp_mac':host_detail['dhcp_mac'],
'storage_size':disk_storage_size_m,
'memory_size':memory_size_g,
'interfaces':host_detail['interfaces'],
'root_lv_size':root_lv_size_m,
'swap_lv_size':swap_lv_size_m,
'cinder_vg_size':cinder_vg_size_m,
'disk_list':disk_list,
'root_disk':root_disk,
'root_pwd':root_pwd,
'isolcpus':isolcpus,
'hugepagesize':hugepagesize,
'hugepages':hugepages,
'reboot':'no'}
if host_detail.has_key('glance_lv_size'):
kwargs['glance_lv_size'] = host_detail['glance_lv_size']
else:
kwargs['glance_lv_size'] = 0
if host_detail.has_key('db_lv_size') and host_detail['db_lv_size']:
kwargs['db_lv_size'] = host_detail['db_lv_size']
else:
kwargs['db_lv_size'] = 0
if host_detail.has_key('mongodb_lv_size') and host_detail['mongodb_lv_size']:
kwargs['mongodb_lv_size'] = host_detail['mongodb_lv_size']
else:
kwargs['mongodb_lv_size'] = 0
if host_detail.has_key('nova_lv_size') and host_detail['nova_lv_size']:
kwargs['nova_lv_size'] = host_detail['nova_lv_size']
else:
kwargs['nova_lv_size'] = 0
install_os_obj = self.ironicclient.daisy.install_os(**kwargs)
install_os_dict = dict([(f, getattr(install_os_obj, f, '')) for f in ['return_code', 'info']])
rc = int(install_os_dict['return_code'])
if rc != 0:
install_os_description = install_os_dict['info']
LOG.info(_("install os config failed because of '%s'" % (install_os_description)))
host_status = {'os_status':host_os_status['INSTALL_FAILED'],
'os_progress':0,
'messages':install_os_description}
update_db_host_status(self.req, host_detail['id'],host_status)
msg = "ironic install os return failed for host %s" % host_detail['id']
raise exception.OSInstallFailed(message=msg)
self._set_boot_or_power_state(host_detail['ipmi_user'],
host_detail['ipmi_passwd'],
host_detail['ipmi_addr'],
'reset')
def _install_os_by_rousource_type(self, hosts_detail):
# all hosts status set to 'init' before install os
for host_detail in hosts_detail:
host_status = {'os_status':host_os_status['INIT'],
'os_progress':0,
'messages':''}
update_db_host_status(self.req, host_detail['id'],host_status)
for host_detail in hosts_detail:
self._baremetal_install_os(host_detail)
def _set_disk_start_mode(self, host_detail):
LOG.info(_("Set boot from disk for host %s" % (host_detail['id'])))
self._set_boot_or_power_state(host_detail['ipmi_user'],
host_detail['ipmi_passwd'],
host_detail['ipmi_addr'],
'disk')
LOG.info(_("reboot host %s" % (host_detail['id'])))
self._set_boot_or_power_state(host_detail['ipmi_user'],
host_detail['ipmi_passwd'],
host_detail['ipmi_addr'],
'reset')
def _init_progress(self, host_detail, hosts_status):
host_id = host_detail['id']
host_status = hosts_status[host_id] = {}
host_status['os_status'] = host_os_status['INSTALLING']
host_status['os_progress'] = 0
host_status['count'] = 0
if host_detail['resource_type'] == 'docker':
host_status['messages'] = "docker container is creating"
else:
host_status['messages'] = "OS installing"
update_db_host_status(self.req, host_id, host_status)
def _query_host_progress(self, host_detail, host_status, host_last_status):
host_id = host_detail['id']
install_result_obj = \
self.ironicclient.daisy.get_install_progress(host_detail['dhcp_mac'])
install_result = dict([(f, getattr(install_result_obj, f, ''))
for f in ['return_code', 'info', 'progress']])
rc = int(install_result['return_code'])
host_status['os_progress'] = int(install_result['progress'])
if rc == 0:
if host_status['os_progress'] == 100:
time_cost = str(round((time.time() - daisy_cmn.os_install_start_time)/60, 2))
LOG.info(_("It takes %s min for host %s to install os" % (time_cost, host_id)))
LOG.info(_("host %s install os completely." % host_id))
host_status['os_status'] = host_os_status['ACTIVE']
host_status['messages'] = "OS installed successfully"
# wait for nicfix script complete
time.sleep(10)
self._set_disk_start_mode(host_detail)
else:
if host_status['os_progress'] == host_last_status['os_progress']:
host_status['count'] = host_status['count'] + 1
LOG.debug(_("host %s has kept %ss when progress is %s." % (host_id,
host_status['count']*self.time_step, host_status['os_progress'])))
else:
LOG.info(_("host %s install failed." % host_id))
host_status['os_status'] = host_os_status['INSTALL_FAILED']
host_status['messages'] = install_result['info']
def _query_progress(self, hosts_last_status, hosts_detail):
hosts_status = copy.deepcopy(hosts_last_status)
for host_detail in hosts_detail:
host_id = host_detail['id']
if not hosts_status.has_key(host_id):
self._init_progress(host_detail, hosts_status)
continue
host_status = hosts_status[host_id]
host_last_status = hosts_last_status[host_id]
#only process installing hosts after init, other hosts info will be kept in hosts_status
if host_status['os_status'] != host_os_status['INSTALLING']:
continue
self._query_host_progress(host_detail, host_status, host_last_status)
if host_status['count']*self.time_step >= self.single_host_install_timeout:
host_status['os_status'] = host_os_status['INSTALL_FAILED']
if host_detail['resource_type'] == 'docker':
host_status['messages'] = "docker container created timeout"
else:
host_status['messages'] = "os installed timeout"
if (host_status['os_progress'] != host_last_status['os_progress'] or\
host_status['os_status'] != host_last_status['os_status']):
host_status['count'] = 0
update_db_host_status(self.req, host_id,host_status)
return hosts_status
def _get_install_status(self, hosts_detail):
query_count = 0
hosts_last_status = {}
while True:
hosts_install_status = self._query_progress(hosts_last_status, hosts_detail)
# if all hosts install over, break
installing_hosts = [id for id in hosts_install_status.keys()
if hosts_install_status[id]['os_status'] == host_os_status['INSTALLING']]
if not installing_hosts:
break
#after 3h, if some hosts are not 'active', label them to 'failed'.
elif query_count*self.time_step >= self.cluster_hosts_install_timeout:
for host_id,host_status in hosts_install_status.iteritems():
if (host_status['os_status'] != host_os_status['ACTIVE'] and
host_status['os_status'] != host_os_status['INSTALL_FAILED']):
# label the host install failed because of time out for 3h
host_status['os_status'] = host_os_status['INSTALL_FAILED']
host_status['messages'] = "cluster os installed timeout"
update_db_host_status(self.req, host_id, host_status)
break
else:
query_count += 1
hosts_last_status = hosts_install_status
time.sleep(self.time_step)
return hosts_install_status
def install_os(self, hosts_detail, role_hosts_ids):
if len(hosts_detail) > self.max_parallel_os_num:
install_hosts = hosts_detail[:self.max_parallel_os_num]
hosts_detail = hosts_detail[self.max_parallel_os_num:]
else:
install_hosts = hosts_detail
hosts_detail = []
install_hosts_id = [host_detail['id'] for host_detail in install_hosts]
LOG.info(_("Begin install os for hosts %s." % ','.join(install_hosts_id)))
daisy_cmn.os_install_start_time = time.time()
self._install_os_by_rousource_type(install_hosts)
LOG.info(_("Begin to query install progress..."))
# wait to install completely
cluster_install_status = self._get_install_status(install_hosts)
total_time_cost = str(round((time.time() - daisy_cmn.os_install_start_time)/60, 2))
LOG.info(_("It totally takes %s min for all host to install os" % total_time_cost))
LOG.info(_("OS install in cluster %s result is:" % self.cluster_id))
LOG.info(_("%s %s %s" % ('host-id', 'os-status', 'description')))
for host_id,host_status in cluster_install_status.iteritems():
LOG.info(_("%s %s %s" % (host_id, host_status['os_status'], host_status['messages'])))
if host_id in role_hosts_ids:
if host_status['os_status'] == host_os_status['INSTALL_FAILED']:
break
else:
role_hosts_ids.remove(host_id)
return (hosts_detail, role_hosts_ids)
def _os_thread_bin(req, host_ip, host_id):
host_meta = {}
password = "ossdbg1"
LOG.info(_("Begin update os for host %s." % (host_ip)))
cmd = 'mkdir -p /var/log/daisy/daisy_update/'
daisy_cmn.subprocess_call(cmd)
var_log_path = "/var/log/daisy/daisy_update/%s_update_tfg.log" % host_ip
with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd,fp)
cmd = 'clush -S -w %s "mkdir -p /home/daisy_update"' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp)
cmd = 'clush -S -b -w %s "rm -rf /home/daisy_update/*"' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp)
cmd = 'clush -S -w %s -c /var/lib/daisy/tecs/*CGSL_VPLAT*.iso /var/lib/daisy/tecs/tfg_upgrade.sh --dest=/home/daisy_update' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp)
cmd = 'clush -S -w %s "chmod 777 /home/daisy_update/*"' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp)
host_meta['os_progress'] = 30
host_meta['os_status'] = host_os_status['UPDATING']
host_meta['messages'] = ""
update_db_host_status(req, host_id, host_meta)
try:
exc_result = subprocess.check_output(
'clush -S -w %s "/home/daisy_update/tfg_upgrade.sh"' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if e.returncode == 255 and "reboot" in e.output.strip():
host_meta['os_progress'] = 100
host_meta['os_status'] = host_os_status['ACTIVE']
host_meta['messages'] = "upgrade tfg successfully,os reboot"
LOG.info(_("Update tfg for %s successfully,os reboot!" % host_ip))
daisy_cmn.check_reboot_ping(host_ip)
else:
host_meta['os_progress'] = 0
host_meta['os_status'] = host_os_status['UPDATE_FAILED']
host_meta['messages'] = e.output.strip()[-400:-200].replace('\n',' ')
LOG.error(_("Update tfg for %s failed!" % host_ip))
update_db_host_status(req, host_id, host_meta)
fp.write(e.output.strip())
else:
host_meta['os_progress'] = 100
host_meta['os_status'] = host_os_status['ACTIVE']
host_meta['messages'] = "upgrade tfg successfully"
update_db_host_status(req, host_id, host_meta)
LOG.info(_("Update os for %s successfully!" % host_ip))
fp.write(exc_result)
if "reboot" in exc_result:
daisy_cmn.check_reboot_ping(host_ip)
# this will be raise raise all the exceptions of the thread to log file
def os_thread_bin(req, host_ip, host_id):
try:
_os_thread_bin(req, host_ip, host_id)
except Exception as e:
LOG.exception(e.message)
raise exception.ThreadBinException(message=e.message)
def _get_host_os_version(host_ip, host_pwd='ossdbg1'):
version = ""
tfg_version_file = '/usr/sbin/tfg_showversion'
try:
subprocess.check_output("sshpass -p %s ssh -o StrictHostKeyChecking=no"
" %s test -f %s" % (host_pwd, host_ip,
tfg_version_file),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
LOG.info(_("Host %s os version is TFG" % host_ip))
return version
try:
process = subprocess.Popen(["sshpass", "-p", "%s" % host_pwd, "ssh",
"-o StrictHostKeyChecking=no", "%s" % host_ip,
'tfg_showversion'], shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
version = process.stdout.read().strip('\n')
except subprocess.CalledProcessError:
msg = _("Get host %s os version by subprocess failed!" % host_ip)
raise exception.SubprocessCmdFailed(message=msg)
if version:
LOG.info(_("Host %s os version is %s" % (host_ip, version)))
return version
else:
msg = _("Get host %s os version by tfg_showversion failed!" % host_ip)
LOG.error(msg)
raise exception.Invalid(message=msg)
def _cmp_os_version(new_os_file, old_os_version, target_host_ip, password='ossdbg1'):
shell_file = '/usr/sbin/tfg_showversion'
if old_os_version:
try:
subprocess.check_output("test -f %s" % shell_file, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
scripts = ["sshpass -p %s scp -r -o StrictHostKeyChecking=no %s:%s "
"/usr/sbin/" % (password, target_host_ip, shell_file)]
tecs_cmn.run_scrip(scripts)
cmp_script = "tfg_showversion %s %s" % (new_os_file, old_os_version)
try:
result = subprocess.check_output(cmp_script, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return -1
else:
if new_os_file.find("Mimosa") != -1:
return 0
else:
msg = _("Please use Mimosa os to upgrade instead of TFG")
LOG.error(msg)
raise exception.Forbidden(message=msg)
return result.find("yes")
def upgrade_os(req, hosts_list):
upgrade_hosts = []
max_parallel_os_upgrade_number = int(CONF.max_parallel_os_upgrade_number)
while hosts_list:
host_meta = {}
threads = []
if len(hosts_list) > max_parallel_os_upgrade_number:
upgrade_hosts = hosts_list[:max_parallel_os_upgrade_number]
hosts_list = hosts_list[max_parallel_os_upgrade_number:]
else:
upgrade_hosts = hosts_list
hosts_list = []
new_os_file = check_tfg_exist()
for host_info in upgrade_hosts:
host_id = host_info.keys()[0]
host_ip = host_info.values()[0]
host_detail = daisy_cmn.get_host_detail(req, host_id)
target_host_os = _get_host_os_version(host_ip, host_detail['root_pwd'])
if _cmp_os_version(new_os_file, target_host_os, host_ip) == 0:
host_meta['os_progress'] = 10
host_meta['os_status'] = host_os_status['UPDATING']
host_meta['messages'] = "os updating,begin copy iso"
update_db_host_status(req, host_id, host_meta)
t = threading.Thread(target=os_thread_bin, args=(req, host_ip,
host_id))
t.setDaemon(True)
t.start()
threads.append(t)
else:
LOG.warn(_("new os version is lower than or equal to that of "
"host %s, don't need to upgrade!" % host_ip))
try:
for t in threads:
t.join()
except:
LOG.warn(_("Join update thread %s failed!" % t))
else:
for host_info in upgrade_hosts:
update_failed_flag = False
host_id = host_info.keys()[0]
host_ip = host_info.values()[0]
host = registry.get_host_metadata(req.context, host_id)
if host['os_status'] == host_os_status['UPDATE_FAILED'] or host['os_status'] == host_os_status['INIT']:
update_failed_flag = True
raise exception.ThreadBinException("%s update tfg failed! %s" % (host_ip, host['messages']))
if not update_failed_flag:
host_meta = {}
host_meta['os_progress'] = 100
host_meta['os_status'] = host_os_status['ACTIVE']
host_meta['messages'] = "os upgrade successfully"
update_db_host_status(req, host_id,host_meta)

View File

@ -0,0 +1,126 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for proton API
"""
from oslo_log import log as logging
import threading
from daisy import i18n
from daisy.common import exception
from daisy.api.backends import driver
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.proton.common as proton_cmn
import daisy.api.backends.proton.install as instl
import daisy.api.backends.proton.uninstall as unstl
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
proton_state = proton_cmn.PROTON_STATE
class API(driver.DeploymentDriver):
"""
The hosts API is a RESTful web service for host data. The API
is as follows::
GET /hosts -- Returns a set of brief metadata about hosts
GET /hosts/detail -- Returns a set of detailed metadata about
hosts
HEAD /hosts/<ID> -- Return metadata about an host with id <ID>
GET /hosts/<ID> -- Return host data for host with id <ID>
POST /hosts -- Store host data and return metadata about the
newly-stored host
PUT /hosts/<ID> -- Update host metadata and/or upload host
data for a previously-reserved host
DELETE /hosts/<ID> -- Delete the host with id <ID>
"""
def __init__(self):
super(API, self).__init__()
return
def install(self, req, cluster_id):
"""
Install PROTON to a cluster.
cluster_id:cluster id
"""
proton_install_task = instl.ProtonInstallTask(req, cluster_id)
proton_install_task.start()
def _uninstall(self, req, role_id, threads):
try:
for t in threads:
t.setDaemon(True)
t.start()
LOG.info(_("uninstall threads have started,"
" please waiting...."))
for t in threads:
t.join()
except:
LOG.warn(_("Join uninstall thread failed!"))
else:
uninstall_failed_flag = False
role = daisy_cmn.get_role_detail(req, role_id)
if role['progress'] == 100:
unstl.update_progress_to_db(
req, role_id, proton_state['UNINSTALL_FAILED'])
uninstall_failed_flag = True
return
if role['status'] == proton_state['UNINSTALL_FAILED']:
uninstall_failed_flag = True
return
if not uninstall_failed_flag:
LOG.info(_("all uninstall threads have done,"
" set role of proton status to 'init'!"))
unstl.update_progress_to_db(req, role_id,
proton_state['INIT'])
def uninstall(self, req, cluster_id):
"""
Uninstall PROTON to a cluster.
:raises HTTPBadRequest if x-install-cluster is missing
"""
(role_id, hosts_list) = proton_cmn.get_roles_and_hosts_list(req,
cluster_id)
if role_id:
if not hosts_list:
msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg)
unstl.update_progress_to_db(req, role_id,
proton_state['UNINSTALLING'], 0.0)
uninstall_progress_percentage = \
round(1 * 1.0 / len(hosts_list), 2) * 100
threads = []
for host in hosts_list:
host_detail = proton_cmn.get_host_detail(req, host['host_id'])
t = threading.Thread(target=unstl.thread_bin,
args=(req,
host_detail['interfaces'][0]['ip'],
role_id,
uninstall_progress_percentage))
threads.append(t)
self._uninstall(req, role_id, threads)

View File

@ -0,0 +1,178 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for proton API
"""
import subprocess
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from daisy import i18n
from daisy.common import exception
import daisy.registry.client.v1.api as registry
import daisy.api.backends.common as daisy_cmn
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
daisy_proton_path = '/var/lib/daisy/proton/'
PROTON_STATE = {
'INIT': 'init',
'INSTALLING': 'installing',
'ACTIVE': 'active',
'INSTALL_FAILED': 'install-failed',
'UNINSTALLING': 'uninstalling',
'UNINSTALL_FAILED': 'uninstall-failed',
'UPDATING': 'updating',
'UPDATE_FAILED': 'update-failed',
}
def get_host_detail(req, host_id):
try:
host_detail = registry.get_host_metadata(req.context, host_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return host_detail
def get_roles_detail(req):
try:
roles = registry.get_roles_detail(req.context)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return roles
def get_hosts_of_role(req, role_id):
try:
hosts = registry.get_role_host_metadata(req.context, role_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return hosts
def get_roles_and_hosts_list(req, cluster_id):
roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
for role in roles:
if role['deployment_backend'] == daisy_cmn.proton_backend_name:
role_hosts = get_hosts_of_role(req, role['id'])
return (role['id'], role_hosts)
def get_role_detail(req, role_id):
try:
role = registry.get_role_metadata(req.context, role_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return role
def check_and_get_proton_version(daisy_proton_path):
proton_version_pkg_name = ""
get_proton_version_pkg = "ls %s| grep ^ZXDTC-PROTON.*\.bin$" \
% daisy_proton_path
obj = subprocess.Popen(
get_proton_version_pkg, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
if stdoutput:
proton_version_pkg_name = stdoutput.split('\n')[0]
proton_version_pkg_file = daisy_proton_path + proton_version_pkg_name
chmod_for_proton_version = 'chmod +x %s' % proton_version_pkg_file
daisy_cmn.subprocess_call(chmod_for_proton_version)
return proton_version_pkg_name
class ProtonShellExector():
"""
Install proton bin.
"""
def __init__(self, mgt_ip, proton_version_name, task_type, rmc_ip=''):
self.task_type = task_type
self.mgt_ip = mgt_ip
self.proton_version_file = daisy_proton_path + proton_version_name
self.rmc_ip = rmc_ip
self.clush_cmd = ""
self.oper_type = {
'install': self._install_proton,
'uninstall': self._uninstall_proton
}
self.oper_shell = {
'CMD_SSHPASS_PRE': "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s",
'CMD_BIN_SCP':
"scp %(path)s root@%(ssh_ip)s:/home" %
{'path': self.proton_version_file, 'ssh_ip': mgt_ip},
'CMD_BIN_INSTALL': "sudo /home/%s install %s 7777" %
(proton_version_name, self.rmc_ip),
'CMD_BIN_UNINSTALL': "sudo /home/%s uninstall" %
proton_version_name,
'CMD_BIN_REMOVE': "sudo rm -rf /home/%s" % proton_version_name
}
self._execute()
def _install_proton(self):
self.clush_cmd = \
"%s;%s" % (
self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip": "", "cmd": self.oper_shell['CMD_BIN_SCP']},
self.oper_shell['CMD_SSHPASS_PRE'] %
{
"ssh_ip": "ssh " + self.mgt_ip, "cmd":
self.oper_shell['CMD_BIN_INSTALL']
}
)
subprocess.check_output(self.clush_cmd, shell=True,
stderr=subprocess.STDOUT)
def _uninstall_proton(self):
self.clush_cmd = \
"%s;%s" % (
self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip": "", "cmd": self.oper_shell['CMD_BIN_SCP']},
self.oper_shell['CMD_SSHPASS_PRE'] %
{
"ssh_ip": "ssh " + self.mgt_ip,
"cmd": self.oper_shell['CMD_BIN_UNINSTALL']
}
)
subprocess.check_output(self.clush_cmd, shell=True,
stderr=subprocess.STDOUT)
def _execute(self):
try:
if not self.task_type or not self.mgt_ip:
LOG.error(_("<<<ProtonShellExector::execute,"
" input params invalid!>>>"))
return
self.oper_type[self.task_type]()
except subprocess.CalledProcessError as e:
LOG.warn(_("<<<ProtonShellExector::execute:Execute command "
"failed! Reason:%s>>>" % e.output.strip()))
except Exception as e:
LOG.exception(_(e.message))
else:
LOG.info(_("<<<ProtonShellExector::execute:Execute command:%s,"
"successful!>>>" % self.clush_cmd))

View File

@ -0,0 +1,153 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for proton API
"""
from oslo_log import log as logging
from threading import Thread
from daisy import i18n
import daisy.api.v1
from daisy.common import exception
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.proton.common as proton_cmn
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
proton_state = proton_cmn.PROTON_STATE
daisy_proton_path = proton_cmn.daisy_proton_path
def get_proton_ip(req, role_hosts):
proton_ip_list = []
for role_host in role_hosts:
host_detail = proton_cmn.get_host_detail(req,
role_host['host_id'])
for interface in host_detail['interfaces']:
for network in interface['assigned_networks']:
if network.get("name") == "MANAGEMENT":
proton_ip_list.append(network.get("ip"))
return proton_ip_list
def get_proton_hosts(req, cluster_id):
all_roles = proton_cmn.get_roles_detail(req)
for role in all_roles:
if role['cluster_id'] == cluster_id and role['name'] == 'PROTON':
role_hosts = proton_cmn.get_hosts_of_role(req, role['id'])
return get_proton_ip(req, role_hosts)
def get_rmc_host(req, cluster_id):
return "10.43.211.63"
class ProtonInstallTask(Thread):
"""
Class for install proton bin.
"""
def __init__(self, req, cluster_id):
super(ProtonInstallTask, self).__init__()
self.req = req
self.cluster_id = cluster_id
self.progress = 0
self.message = ""
self.state = proton_state['INIT']
self.proton_ip_list = []
self.install_log_fp = None
self.last_line_num = 0
self.need_install = False
self.ping_times = 36
def _update_install_progress_to_db(self):
"""
Update progress of intallation to db.
:return:
"""
roles = daisy_cmn.get_cluster_roles_detail(self.req, self.cluster_id)
for role in roles:
if role['deployment_backend'] != daisy_cmn.proton_backend_name:
continue
role_hosts = daisy_cmn.get_hosts_of_role(self.req, role['id'])
for role_host in role_hosts:
if role_host['status'] != proton_state['ACTIVE']:
self.need_install = True
role_host['status'] = self.state
daisy_cmn.update_role_host(self.req, role_host['id'],
role_host)
role['status'] = self.state
role['messages'] = self.message
daisy_cmn.update_role(self.req, role['id'], role)
def run(self):
try:
self._run()
except (exception.InstallException,
exception.NotFound,
exception.InstallTimeoutException) as e:
LOG.exception(e.message)
else:
self.progress = 100
self.state = proton_state['ACTIVE']
self.message = "Proton install successfully"
LOG.info(_("Install PROTON for cluster %s successfully." %
self.cluster_id))
finally:
self._update_install_progress_to_db()
def _run(self):
"""
Exectue install file(.bin) with sync mode.
:return:
"""
if not self.cluster_id or not self.req:
raise exception.InstallException(
cluster_id=self.cluster_id, reason="invalid params.")
self.proton_ip_list = get_proton_hosts(self.req, self.cluster_id)
unreached_hosts = daisy_cmn.check_ping_hosts(self.proton_ip_list,
self.ping_times)
if unreached_hosts:
self.state = proton_state['INSTALL_FAILED']
self.message = "hosts %s ping failed" % unreached_hosts
raise exception.NotFound(message=self.message)
proton_version_name = \
proton_cmn.check_and_get_proton_version(daisy_proton_path)
if not proton_version_name:
self.state = proton_state['INSTALL_FAILED']
self.message = "PROTON version file not found in %s" % \
daisy_proton_path
raise exception.NotFound(message=self.message)
rmc_ip = get_rmc_host(self.req, self.cluster_id)
for proton_ip in self.proton_ip_list:
proton_cmn.ProtonShellExector(proton_ip, proton_version_name,
'install', rmc_ip)

View File

@ -0,0 +1,103 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/hosts endpoint for Daisy v1 API
"""
import subprocess
from oslo_log import log as logging
import threading
from daisy import i18n
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.proton.common as proton_cmn
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
proton_state = proton_cmn.PROTON_STATE
daisy_proton_path = proton_cmn.daisy_proton_path
# uninstall init progress is 100, when uninstall succefully,
# uninstall progress is 0, and web display progress is reverted
uninstall_proton_progress = 100.0
uninstall_mutex = threading.Lock()
def update_progress_to_db(req, role_id, status, progress_percentage_step=0.0):
"""
Write uninstall progress and status to db, we use global lock object
'uninstall_mutex' to make sure this function is thread safety.
:param req: http req.
:param role_id_list: Column neeb be update in role table.
:param status: Uninstall status.
:return:
"""
global uninstall_mutex
global uninstall_proton_progress
uninstall_mutex.acquire(True)
uninstall_proton_progress -= progress_percentage_step
role = {}
role_hosts = daisy_cmn.get_hosts_of_role(req, role_id)
if status == proton_state['UNINSTALLING']:
role['status'] = status
role['progress'] = uninstall_proton_progress
role['messages'] = 'Proton uninstalling'
for role_host in role_hosts:
role_host_meta = dict()
role_host_meta['status'] = status
role_host_meta['progress'] = uninstall_proton_progress
daisy_cmn.update_role_host(req, role_host['id'], role_host_meta)
if status == proton_state['UNINSTALL_FAILED']:
role['status'] = status
role['messages'] = 'Uninstall-failed'
for role_host in role_hosts:
role_host_meta = dict()
role_host_meta['status'] = status
daisy_cmn.update_role_host(req, role_host['id'], role_host_meta)
elif status == proton_state['INIT']:
role['status'] = status
role['progress'] = 0
role['messages'] = 'Proton uninstall successfully'
daisy_cmn.delete_role_hosts(req, role_id)
daisy_cmn.update_role(req, role_id, role)
uninstall_mutex.release()
def _thread_bin(req, host_ip, role_id, uninstall_progress_percentage):
try:
proton_version_name = \
proton_cmn.check_and_get_proton_version(daisy_proton_path)
proton_cmn.ProtonShellExector(host_ip, proton_version_name,
'uninstall')
except subprocess.CalledProcessError:
update_progress_to_db(req, role_id, proton_state['UNINSTALL_FAILED'])
LOG.info(_("Uninstall PROTON for %s failed!" % host_ip))
else:
update_progress_to_db(req, role_id, proton_state['UNINSTALLING'],
uninstall_progress_percentage)
LOG.info(_("Uninstall PROTON for %s successfully!" % host_ip))
def thread_bin(req, host_ip, role_id, uninstall_progress_percentage):
try:
_thread_bin(req, host_ip, role_id, uninstall_progress_percentage)
except Exception as e:
LOG.exception(e.message)

View File

@ -0,0 +1,382 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for tecs API
"""
import os
import copy
import subprocess
import time
import commands
import traceback
import webob.exc
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from webob.exc import HTTPServerError
import threading
from threading import Thread
from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
import daisy.registry.client.v1.api as registry
from daisy.api.backends.tecs import config
from daisy.api.backends import driver
from daisy.api.network_api import network as neutron
from ironicclient import client as ironic_client
import daisy.api.backends.os as os_handle
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn
import daisy.api.backends.tecs.install as instl
import daisy.api.backends.tecs.uninstall as unstl
import daisy.api.backends.tecs.upgrade as upgrd
import daisy.api.backends.tecs.disk_array as disk_array
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
CONF = cfg.CONF
upgrade_opts = [
cfg.StrOpt('max_parallel_os_upgrade_number', default=10,
help='Maximum number of hosts upgrade os at the same time.'),
]
CONF.register_opts(upgrade_opts)
tecs_state = tecs_cmn.TECS_STATE
class API(driver.DeploymentDriver):
"""
The hosts API is a RESTful web service for host data. The API
is as follows::
GET /hosts -- Returns a set of brief metadata about hosts
GET /hosts/detail -- Returns a set of detailed metadata about
hosts
HEAD /hosts/<ID> -- Return metadata about an host with id <ID>
GET /hosts/<ID> -- Return host data for host with id <ID>
POST /hosts -- Store host data and return metadata about the
newly-stored host
PUT /hosts/<ID> -- Update host metadata and/or upload host
data for a previously-reserved host
DELETE /hosts/<ID> -- Delete the host with id <ID>
"""
def __init__(self):
super(API, self).__init__()
return
def install(self, req, cluster_id):
"""
Install TECS to a cluster.
param req: The WSGI/Webob Request object
cluster_id:cluster id
"""
tecs_install_task = instl.TECSInstallTask(req, cluster_id)
tecs_install_task.start()
def _get_roles_and_hosts_ip_list(self, req, cluster_id):
host_ha_list = set()
host_ip_list = set()
role_id_list = set()
hosts_id_list = []
hosts_list = []
roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id)
cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id)
for role in roles:
if role['deployment_backend'] != daisy_cmn.tecs_backend_name:
continue
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id'])
if role_hosts:
for role_host in role_hosts:
host = daisy_cmn.get_host_detail(req, role_host['host_id'])
host_ip = tecs_cmn.get_host_network_ip(req, host, cluster_networks, 'MANAGEMENT')
if role['name'] == "CONTROLLER_HA":
host_ha_list.add(host_ip)
host_ip_list.add(host_ip)
hosts_id_list.append({host['id']:host_ip})
role_id_list.add(role['id'])
for host in hosts_id_list:
if host not in hosts_list:
hosts_list.append(host)
return (role_id_list, host_ip_list, host_ha_list, hosts_list)
def _query_progress(self, req, cluster_id, action=""):
nodes_list = []
roles = daisy_cmn.get_roles_detail(req)
(role_id_list,host_ip_list,host_ha_list,hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id)
for host in hosts_list:
node = {}
host_id = host.keys()[0]
host = daisy_cmn.get_host_detail(req, host_id)
node['id'] = host['id']
node['name'] = host['name']
if 0 == cmp("upgrade", action):
node['os-progress'] = host['os_progress']
node['os-status'] = host['os_status']
node['os-messages'] = host['messages']
if host['status'] == "with-role":
host_roles = [ role for role in roles if role['name'] in host['role'] and role['cluster_id'] == cluster_id]
if host_roles:
node['role-status'] = host_roles[0]['status']
node['role-progress'] = str(host_roles[0]['progress'])
# node['role-message'] = host_roles[0]['messages']
nodes_list.append(node)
if nodes_list:
return {'tecs_nodes': nodes_list}
else:
return {'tecs_nodes': "TECS uninstall successfully, the host has been removed from the host_roles table"}
def uninstall(self, req, cluster_id):
"""
Uninstall TECS to a cluster.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
(role_id_list, host_ip_list,host_ha_list, hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id)
if role_id_list:
if not host_ip_list:
msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg)
unstl.update_progress_to_db(req, role_id_list, tecs_state['UNINSTALLING'], hosts_list)
threads = []
for host_ip in host_ip_list:
t = threading.Thread(target=unstl.thread_bin,args=(req,host_ip,role_id_list,hosts_list))
t.setDaemon(True)
t.start()
threads.append(t)
LOG.info(_("Uninstall threads have started, please waiting...."))
try:
for t in threads:
t.join()
except:
LOG.warn(_("Join uninstall thread %s failed!" % t))
else:
uninstall_failed_flag = False
for role_id in role_id_list:
role_hosts=daisy_cmn.get_hosts_of_role(req,role_id)
for role_host in role_hosts:
if role_host['status'] == tecs_state['UNINSTALL_FAILED']:
unstl.update_progress_to_db(req, role_id_list, tecs_state['UNINSTALL_FAILED'],hosts_list)
uninstall_failed_flag = True
break
if not uninstall_failed_flag:
LOG.info(_("All uninstall threads have done, set all roles status to 'init'!"))
unstl.update_progress_to_db(req, role_id_list, tecs_state['INIT'], hosts_list)
try:
(status, output) = commands.getstatusoutput('rpm -e --nodeps openstack-packstack\
openstack-packstack-puppet openstack-puppet-modules puppet')
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
def uninstall_progress(self, req, cluster_id):
return self._query_progress(req, cluster_id, "uninstall")
def upgrade(self, req, cluster_id):
"""
update TECS to a cluster.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
(role_id_list,host_ip_list,host_ha_list,hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id)
if role_id_list:
if not host_ip_list:
msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg)
unreached_hosts = daisy_cmn.check_ping_hosts(host_ip_list, 1)
if unreached_hosts:
self.message = "hosts %s ping failed" % unreached_hosts
raise exception.NotFound(message=self.message)
daisy_cmn.subprocess_call('rm -rf /root/.ssh/known_hosts')
if os_handle.check_tfg_exist():
os_handle.upgrade_os(req, hosts_list)
unreached_hosts = daisy_cmn.check_ping_hosts(host_ip_list, 30)
if unreached_hosts:
self.message = "hosts %s ping failed after tfg upgrade" % unreached_hosts
raise exception.NotFound(message=self.message)
# check and get TECS version
tecs_version_pkg_file = tecs_cmn.check_and_get_tecs_version(tecs_cmn.daisy_tecs_path)
if not tecs_version_pkg_file:
self.state = tecs_state['INSTALL_FAILED']
self.message = "TECS version file not found in %s" % tecs_cmn.daisy_tecs_path
raise exception.NotFound(message=self.message)
threads = []
LOG.info(_("Begin to update TECS controller nodes, please waiting...."))
upgrd.update_progress_to_db(req, role_id_list, tecs_state['UPDATING'], hosts_list)
for host_ip in host_ha_list:
LOG.info(_("Update TECS controller node %s..." % host_ip))
rc = upgrd.thread_bin(req,role_id_list,host_ip,hosts_list)
if rc == 0:
LOG.info(_("Update TECS for %s successfully" % host_ip))
else:
LOG.info(_("Update TECS failed for %s, return %s" % (host_ip,rc)))
return
LOG.info(_("Begin to update TECS other nodes, please waiting...."))
max_parallel_upgrade_number = int(CONF.max_parallel_os_upgrade_number)
compute_ip_list = host_ip_list - host_ha_list
while compute_ip_list:
threads = []
if len(compute_ip_list) > max_parallel_upgrade_number:
upgrade_hosts = compute_ip_list[:max_parallel_upgrade_number]
compute_ip_list = compute_ip_list[max_parallel_upgrade_number:]
else:
upgrade_hosts = compute_ip_list
compute_ip_list = []
for host_ip in upgrade_hosts:
t = threading.Thread(target=upgrd.thread_bin,args=(req,role_id_list,host_ip,hosts_list))
t.setDaemon(True)
t.start()
threads.append(t)
try:
for t in threads:
t.join()
except:
LOG.warn(_("Join update thread %s failed!" % t))
for role_id in role_id_list:
role_hosts=daisy_cmn.get_hosts_of_role(req,role_id)
for role_host in role_hosts:
if (role_host['status'] == tecs_state['UPDATE_FAILED'] or
role_host['status'] == tecs_state['UPDATING']):
role_id = [role_host['role_id']]
upgrd.update_progress_to_db(req,
role_id,
tecs_state['UPDATE_FAILED'],
hosts_list)
break
elif role_host['status'] == tecs_state['ACTIVE']:
role_id = [role_host['role_id']]
upgrd.update_progress_to_db(req,
role_id,
tecs_state['ACTIVE'],
hosts_list)
def upgrade_progress(self, req, cluster_id):
return self._query_progress(req, cluster_id, "upgrade")
def export_db(self, req, cluster_id):
"""
Export daisy db data to tecs.conf and HA.conf.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
(tecs_config, mgnt_ip_list) =\
instl.get_cluster_tecs_config(req, cluster_id)
config_files = {'tecs_conf':'','ha_conf':''}
tecs_install_path = "/home/tecs_install"
tecs_config_file = ''
if tecs_config:
cluster_conf_path = tecs_install_path + "/" + cluster_id
create_cluster_conf_path =\
"rm -rf %s;mkdir %s" %(cluster_conf_path, cluster_conf_path)
daisy_cmn.subprocess_call(create_cluster_conf_path)
config.update_tecs_config(tecs_config, cluster_conf_path)
get_tecs_conf = "ls %s|grep tecs.conf" % cluster_conf_path
obj = subprocess.Popen(get_tecs_conf,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
tecs_conf_file = ""
if stdoutput:
tecs_conf_file = stdoutput.split('\n')[0]
config_files['tecs_conf'] =\
cluster_conf_path + "/" + tecs_conf_file
get_ha_conf_cmd = "ls %s|grep HA_1.conf" % cluster_conf_path
obj = subprocess.Popen(get_ha_conf_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
ha_conf_file = ""
if stdoutput:
ha_conf_file = stdoutput.split('\n')[0]
config_files['ha_conf'] =\
cluster_conf_path + "/" + ha_conf_file
else:
LOG.info(_("No TECS config files generated."))
return config_files
def update_disk_array(self, req, cluster_id):
(share_disk_info, volume_disk_info) =\
disk_array.get_disk_array_info(req, cluster_id)
(controller_ha_nodes, computer_ips) =\
disk_array.get_ha_and_compute_ips(req, cluster_id)
all_nodes_ip = computer_ips + controller_ha_nodes.keys()
if all_nodes_ip:
compute_error_msg =\
disk_array.config_compute_multipath(all_nodes_ip)
if compute_error_msg:
return compute_error_msg
else:
LOG.info(_("Config Disk Array multipath successfully"))
if share_disk_info:
ha_error_msg =\
disk_array.config_ha_share_disk(share_disk_info,
controller_ha_nodes)
if ha_error_msg:
return ha_error_msg
else:
LOG.info(_("Config Disk Array for HA nodes successfully"))
if volume_disk_info:
cinder_error_msg =\
disk_array.config_ha_cinder_volume(volume_disk_info,
controller_ha_nodes.keys())
if cinder_error_msg:
return cinder_error_msg
else:
LOG.info(_("Config cinder volume for HA nodes successfully"))
return 'update successfully'

View File

@ -0,0 +1,364 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for tecs API
"""
import os
import copy
import subprocess
import time
import re
import traceback
import webob.exc
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread
from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
import daisy.registry.client.v1.api as registry
import daisy.api.backends.common as daisy_cmn
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
daisy_tecs_path = '/var/lib/daisy/tecs/'
TECS_STATE = {
'INIT' : 'init',
'INSTALLING' : 'installing',
'ACTIVE' : 'active',
'INSTALL_FAILED': 'install-failed',
'UNINSTALLING': 'uninstalling',
'UNINSTALL_FAILED': 'uninstall-failed',
'UPDATING': 'updating',
'UPDATE_FAILED': 'update-failed',
}
def _get_cluster_network(cluster_networks, network_name):
network = [cn for cn in cluster_networks
if cn['name'] in network_name]
if not network or not network[0]:
msg = "network %s is not exist" % (network_name)
raise exception.InvalidNetworkConfig(msg)
else:
return network[0]
def get_host_interface_by_network(host_detail, network_name):
host_detail_info = copy.deepcopy(host_detail)
interface_list = [hi for hi in host_detail_info['interfaces']
for assigned_network in hi['assigned_networks']
if assigned_network and network_name == assigned_network['name']]
interface = {}
if interface_list:
interface = interface_list[0]
if not interface and 'MANAGEMENT' == network_name:
msg = "network %s of host %s is not exist" % (network_name, host_detail_info['id'])
raise exception.InvalidNetworkConfig(msg)
return interface
def get_host_network_ip(req, host_detail, cluster_networks, network_name):
interface_network_ip = ''
host_interface = get_host_interface_by_network(host_detail, network_name)
if host_interface:
network = _get_cluster_network(cluster_networks, network_name)
assigned_network = daisy_cmn.get_assigned_network(req,
host_interface['id'],
network['id'])
interface_network_ip = assigned_network['ip']
if not interface_network_ip and 'MANAGEMENT' == network_name :
msg = "%s network ip of host %s can't be empty" % (network_name, host_detail['id'])
raise exception.InvalidNetworkConfig(msg)
return interface_network_ip
def get_storage_name_ip_dict(req, cluster_id, network_type):
name_ip_list = []
ip_list = []
roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id)
networks_list = [network for network in cluster_networks
if network['network_type'] == network_type]
networks_name_list = [network['name'] for network in networks_list]
for role in roles:
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id'])
for role_host in role_hosts:
host_detail = daisy_cmn.get_host_detail(req, role_host['host_id'])
for network_name in networks_name_list:
ip = get_host_network_ip(req, host_detail, cluster_networks,
network_name)
name_ip_dict = {}
if ip and ip not in ip_list:
ip_list.append(ip)
name_ip_dict.update({host_detail['name'] + '.' +
network_name: ip})
name_ip_list.append(name_ip_dict)
return name_ip_list
def get_network_netmask(cluster_networks, network_name):
network = _get_cluster_network(cluster_networks, network_name)
cidr = network['cidr']
if not cidr:
msg = "cidr of network %s is not exist" % (network_name)
raise exception.InvalidNetworkConfig(msg)
netmask = daisy_cmn.cidr_to_netmask(cidr)
if not netmask:
msg = "netmask of network %s is not exist" % (network_name)
raise exception.InvalidNetworkConfig(msg)
return netmask
# every host only have one gateway
def get_network_gateway(cluster_networks, network_name):
network = _get_cluster_network(cluster_networks, network_name)
gateway = network['gateway']
if not gateway and 'MANAGEMENT' == network_name:
msg = "gateway of network %s can't be empty" % (network_name)
raise exception.InvalidNetworkConfig(msg)
return gateway
def get_mngt_network_vlan_id(cluster_networks):
mgnt_vlan_id = ""
management_network = [network for network in cluster_networks if network['network_type'] == 'MANAGEMENT']
if (not management_network or
not management_network[0] or
not management_network[0].has_key('vlan_id')):
msg = "can't get management network vlan id"
raise exception.InvalidNetworkConfig(msg)
else:
mgnt_vlan_id = management_network[0]['vlan_id']
return mgnt_vlan_id
def get_network_vlan_id(cluster_networks, network_type):
vlan_id = ""
general_network = [network for network in cluster_networks
if network['network_type'] == network_type]
if (not general_network or not general_network[0] or
not general_network[0].has_key('vlan_id')):
msg = "can't get %s network vlan id" % network_type
raise exception.InvalidNetworkConfig(msg)
else:
vlan_id = general_network[0]['vlan_id']
return vlan_id
def sort_interfaces_by_pci(host_detail):
"""
Sort interfaces by pci segment, if interface type is bond,
user the pci of first memeber nic.This function is fix bug for
the name length of ovs virtual port, because if the name length large than
15 characters, the port will create failed.
:param interfaces: interfaces info of the host
:return:
"""
interfaces = eval(host_detail.get('interfaces', None)) \
if isinstance(host_detail, unicode) else host_detail.get('interfaces', None)
if not interfaces:
LOG.info("This host don't have /interfaces info.")
return host_detail
tmp_interfaces = copy.deepcopy(interfaces)
if not [interface for interface in tmp_interfaces
if interface.get('name', None) and len(interface['name']) > 8]:
LOG.info("The interfaces name of host is all less than 9 character, no need sort.")
return host_detail
# add pci segment for the bond nic, the pci is equal to the first member nic pci
slaves_name_list = []
for interface in tmp_interfaces:
if interface.get('type', None) == "bond" and \
interface.get('slave1', None) and interface.get('slave2', None):
slaves_name_list.append(interface['slave1'])
slaves_name_list.append(interface['slave2'])
first_member_nic_name = interface['slave1']
tmp_pci = [interface_tmp['pci']
for interface_tmp in tmp_interfaces
if interface_tmp.get('name', None) and
interface_tmp.get('pci', None) and
interface_tmp['name'] == first_member_nic_name]
if len(tmp_pci) != 1:
LOG.error("This host have two nics with same pci.")
continue
interface['pci'] = tmp_pci[0]
tmp_interfaces = [interface for interface in tmp_interfaces
if interface.get('name', None) and
interface['name'] not in slaves_name_list]
tmp_interfaces = sorted(tmp_interfaces, key = lambda interface: interface['pci'])
for index in range(0, len(tmp_interfaces)):
for interface in interfaces:
if interface['name'] != tmp_interfaces[index]['name']:
continue
interface['name'] = "b" + str(index) if interface['type'] == "bond" else "e" + str(index)
tmp_host_detail = copy.deepcopy(host_detail)
tmp_host_detail.update({'interfaces': interfaces})
return tmp_host_detail
def check_and_get_tecs_version(daisy_tecs_pkg_path):
tecs_version_pkg_file = ""
get_tecs_version_pkg = "ls %s| grep ^ZXTECS.*\.bin$" % daisy_tecs_pkg_path
obj = subprocess.Popen(get_tecs_version_pkg,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
if stdoutput:
tecs_version_pkg_name = stdoutput.split('\n')[0]
tecs_version_pkg_file = daisy_tecs_pkg_path + tecs_version_pkg_name
chmod_for_tecs_version = 'chmod +x %s' % tecs_version_pkg_file
daisy_cmn.subprocess_call(chmod_for_tecs_version)
return tecs_version_pkg_file
def get_service_disk_list(req, params):
try:
service_disks = registry.list_service_disk_metadata(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return service_disks
def get_cinder_volume_list(req, params):
try:
cinder_volumes = registry.list_cinder_volume_metadata(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return cinder_volumes
def get_network_configuration_rpm_name():
cmd = "ls %s | grep ^network-configuration.*\.rpm" % daisy_tecs_path
try:
network_rpm_name = subprocess.check_output(
cmd, shell=True, stderr=subprocess.STDOUT).split('\n')[0]
except subprocess.CalledProcessError:
msg = _("Get network-configuration rpm name by subprocess failed!")
raise exception.SubprocessCmdFailed(message=msg)
return network_rpm_name
def run_scrip(script, ip=None, password=None):
script = "\n".join(script)
_PIPE = subprocess.PIPE
if ip:
cmd = ["sshpass", "-p", "%s" % password,
"ssh", "-o StrictHostKeyChecking=no",
"%s" % ip, "bash -x"]
else:
cmd = ["bash", "-x"]
environ = os.environ
environ['LANG'] = 'en_US.UTF8'
obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE,
close_fds=True, shell=False, env=environ)
script = "function t(){ exit $? ; } \n trap t ERR \n" + script
out, err = obj.communicate(script)
return out, err
class TecsShellExector(object):
"""
Class config task before install tecs bin.
"""
def __init__(self, mgnt_ip, task_type, params={}):
self.task_type = task_type
self.mgnt_ip = mgnt_ip
self.params = params
self.clush_cmd = ""
self.rpm_name = get_network_configuration_rpm_name()
self.NETCFG_RPM_PATH = daisy_tecs_path + self.rpm_name
self.oper_type = {
'install_rpm' : self._install_netcfg_rpm,
'uninstall_rpm' : self._uninstall_netcfg_rpm,
'update_rpm' : self._update_netcfg_rpm,
}
self.oper_shell = {
'CMD_SSHPASS_PRE': "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s",
'CMD_RPM_UNINSTALL': "rpm -e network-configuration",
'CMD_RPM_INSTALL': "rpm -i /home/%(rpm)s" % {'rpm': self.rpm_name},
'CMD_RPM_UPDATE': "rpm -U /home/%(rpm)s" % {'rpm': self.rpm_name},
'CMD_RPM_SCP': "scp -o StrictHostKeyChecking=no %(path)s root@%(ssh_ip)s:/home" %
{'path': self.NETCFG_RPM_PATH, 'ssh_ip': mgnt_ip}
}
LOG.info(_("<<<Network configuration rpm is %s>>>" % self.rpm_name))
self._execute()
def _uninstall_netcfg_rpm(self):
self.clush_cmd = self.oper_shell['CMD_SSHPASS_PRE'] % \
{"ssh_ip":"ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, "cmd":self.oper_shell['CMD_RPM_UNINSTALL']}
subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT)
def _update_netcfg_rpm(self):
self.clush_cmd = self.oper_shell['CMD_SSHPASS_PRE'] % \
{"ssh_ip":"ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, "cmd":self.oper_shell['CMD_RPM_UPDATE']}
subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT)
def _install_netcfg_rpm(self):
if not os.path.exists(self.NETCFG_RPM_PATH):
LOG.error(_("<<<Rpm %s not exist>>>" % self.NETCFG_RPM_PATH))
return
self.clush_cmd = "%s;%s" % \
(self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip":"", "cmd":self.oper_shell['CMD_RPM_SCP']}, \
self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip":"ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, "cmd":self.oper_shell['CMD_RPM_INSTALL']})
subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT)
def _execute(self):
try:
if not self.task_type or not self.mgnt_ip :
LOG.error(_("<<<TecsShellExector::execute, input params invalid on %s!>>>" % self.mgnt_ip, ))
return
self.oper_type[self.task_type]()
except subprocess.CalledProcessError as e:
LOG.warn(_("<<<TecsShellExector::execute:Execute command failed on %s! Reason:%s>>>" % (self.mgnt_ip, e.output.strip())))
except Exception as e:
LOG.exception(_(e.message))
else:
LOG.info(_("<<<TecsShellExector::execute:Execute command:%s,successful on %s!>>>" % (self.clush_cmd, self.mgnt_ip)))

View File

@ -0,0 +1,832 @@
# -*- coding: utf-8 -*-
import os
import re
import commands
import types
import subprocess
from oslo_log import log as logging
from ConfigParser import ConfigParser
from daisy.common import exception
from daisy import i18n
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
service_map = {
'lb': 'haproxy',
'mongodb': 'mongod',
'ha': '',
'mariadb': 'mariadb',
'amqp': 'rabbitmq-server',
'ceilometer-api':'openstack-ceilometer-api',
'ceilometer-collector':'openstack-ceilometer-collector,openstack-ceilometer-mend',
'ceilometer-central':'openstack-ceilometer-central',
'ceilometer-notification':'openstack-ceilometer-notification',
'ceilometer-alarm':'openstack-ceilometer-alarm-evaluator,openstack-ceilometer-alarm-notifier',
'heat-api': 'openstack-heat-api',
'heat-api-cfn': 'openstack-heat-api-cfn',
'heat-engine': 'openstack-heat-engine',
'ironic': 'openstack-ironic-api,openstack-ironic-conductor',
'horizon': 'httpd',
'keystone': 'openstack-keystone',
'glance': 'openstack-glance-api,openstack-glance-registry',
'cinder-volume': 'openstack-cinder-volume',
'cinder-scheduler': 'openstack-cinder-scheduler',
'cinder-api': 'openstack-cinder-api',
'neutron-metadata': 'neutron-metadata-agent',
'neutron-lbaas': 'neutron-lbaas-agent',
'neutron-dhcp': 'neutron-dhcp-agent',
'neutron-server': 'neutron-server',
'neutron-l3': 'neutron-l3-agent',
'compute': 'openstack-nova-compute',
'nova-cert': 'openstack-nova-cert',
'nova-sched': 'openstack-nova-scheduler',
'nova-vncproxy': 'openstack-nova-novncproxy,openstack-nova-consoleauth',
'nova-conductor': 'openstack-nova-conductor',
'nova-api': 'openstack-nova-api',
'nova-cells': 'openstack-nova-cells'
}
def add_service_with_host(services, name, host):
if name not in services:
services[name] = []
services[name].append(host)
def add_service_with_hosts(services, name, hosts):
if name not in services:
services[name] = []
for h in hosts:
services[name].append(h['management']['ip'])
def test_ping(ping_src_nic, ping_desc_ips):
ping_cmd = 'fping'
for ip in set(ping_desc_ips):
ping_cmd = ping_cmd + ' -I ' + ping_src_nic + ' ' + ip
obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
_returncode = obj.returncode
if _returncode == 0 or _returncode == 1:
ping_result = stdoutput.split('\n')
unreachable_hosts = [result.split()[0] for result in ping_result if result and result.split()[2] != 'alive']
else:
msg = "ping failed beaceuse there is invlid ip in %s" % ping_desc_ips
raise exception.InvalidIP(msg)
return unreachable_hosts
def get_local_deployment_ip(tecs_deployment_ip):
def _get_ip_segment(full_ip):
if not full_ip:
return None
match = re.search('([0-9]{1,3}\.){3}', full_ip)
if match:
return match.group()
else:
print "can't find ip segment"
return None
(status, output) = commands.getstatusoutput('ifconfig')
netcard_pattern = re.compile('\S*: ')
ip_str = '([0-9]{1,3}\.){3}[0-9]{1,3}'
ip_pattern = re.compile('(inet %s)' % ip_str)
pattern = re.compile(ip_str)
nic_ip = {}
for netcard in re.finditer(netcard_pattern, str(output)):
nic_name = netcard.group().split(': ')[0]
if nic_name == "lo":
continue
ifconfig_nic_cmd = "ifconfig %s" % nic_name
(status, output) = commands.getstatusoutput(ifconfig_nic_cmd)
if status:
continue
ip = pattern.search(str(output))
if ip and ip.group() != "127.0.0.1":
nic_ip[nic_name] = ip.group()
deployment_ip = ''
ip_segment = _get_ip_segment(tecs_deployment_ip)
for nic in nic_ip.keys():
if ip_segment == _get_ip_segment(nic_ip[nic]):
deployment_ip = nic_ip[nic]
break
if not deployment_ip:
for nic,ip in nic_ip.items():
if not test_ping(nic,[tecs_deployment_ip]):
deployment_ip = nic_ip[nic]
break
return deployment_ip
class AnalsyConfig(object):
def __init__(self, all_configs):
self.all_configs = all_configs
self.services = {}
self.components = []
self.modes = {}
# self.ha_conf = {}
self.services_in_component = {}
# self.heartbeat = {}
self.lb_components = []
self.heartbeats = [[], [], []]
self.lb_vip = ''
self.ha_vip = ''
self.db_vip = ''
self.glance_vip = ''
self.public_vip = ''
self.share_disk_services = []
self.ha_conf = {}
self.child_cell_dict = {}
self.ha_master_host = {}
def get_heartbeats(self, host_interfaces):
for network in host_interfaces:
#if network.has_key("deployment") and network["deployment"]["ip"]:
# self.heartbeats[0].append(network["deployment"]["ip"])
self.heartbeats[0].append(network["management"]["ip"])
if network.has_key("storage") and network["storage"]["ip"]:
self.heartbeats[1].append(network["storage"]["ip"])
#delete empty heartbeat line
if not self.heartbeats[0]:
self.heartbeats[0] = self.heartbeats[1]
self.heartbeats[1] = self.heartbeats[2]
if not self.heartbeats[1]:
self.heartbeats[1] = self.heartbeats[2]
# remove repeated ip
if set(self.heartbeats[1]) == set(self.heartbeats[0]):
self.heartbeats[1] = []
if set(self.heartbeats[2]) != set(self.heartbeats[0]):
self.heartbeats[1] = self.heartbeats[2]
self.heartbeats[2] = []
if set(self.heartbeats[2]) == set(self.heartbeats[0]) or set(self.heartbeats[2]) == set(self.heartbeats[1]):
self.heartbeats[2] = []
def prepare_child_cell(self, child_cell_name, configs):
cell_compute_hosts = str()
cell_compute_name = child_cell_name[11:] + '_COMPUTER'
for role_name, role_configs in self.all_configs.items():
if role_name == cell_compute_name:
cell_compute_host = [
host_interface['management']['ip']
for host_interface in role_configs['host_interfaces']]
cell_compute_hosts = ",".join(cell_compute_host)
self.all_configs.pop(role_name)
child_cell_host = configs['host_interfaces'][0]['management']['ip']
self.child_cell_dict[repr(child_cell_host).strip("u'")] \
= repr(cell_compute_hosts).strip("u'")
add_service_with_host(self.services, 'CONFIG_CHILD_CELL_DICT',
str(self.child_cell_dict))
def prepare_ha_lb(self, role_configs, is_ha, is_lb):
if is_lb:
self.ha_master_host['ip'] = role_configs['host_interfaces'][0]['management']['ip']
self.ha_master_host['hostname'] = role_configs['host_interfaces'][0]['name']
self.components.append('CONFIG_LB_INSTALL')
add_service_with_hosts(self.services,
'CONFIG_LB_BACKEND_HOSTS',
role_configs['host_interfaces'])
self.lb_vip = role_configs['vip']
if is_ha:
self.ha_vip = role_configs['vip']
self.share_disk_services += role_configs['share_disk_services']
local_deployment_ip = get_local_deployment_ip(
role_configs['host_interfaces'][0]['management']['ip'])
if local_deployment_ip:
add_service_with_host(
self.services, 'CONFIG_REPO',
'http://'+local_deployment_ip+'/tecs_install/')
else:
msg = "can't find ip for yum repo"
raise exception.InvalidNetworkConfig(msg)
self.components.append('CONFIG_HA_INSTALL')
add_service_with_host(
self.services, 'CONFIG_HA_HOST',
role_configs['host_interfaces'][0]['management']['ip'])
add_service_with_hosts(self.services, 'CONFIG_HA_HOSTS',
role_configs['host_interfaces'])
ntp_host = role_configs['ntp_server'] \
if role_configs['ntp_server'] else role_configs['vip']
add_service_with_host(self.services, 'CONFIG_NTP_SERVERS',
ntp_host)
if role_configs['db_vip']:
self.db_vip = role_configs['db_vip']
add_service_with_host(self.services, 'CONFIG_MARIADB_HOST', role_configs['db_vip'])
else:
self.db_vip = role_configs['vip']
add_service_with_host(self.services, 'CONFIG_MARIADB_HOST', role_configs['vip'])
if role_configs['glance_vip']:
self.glance_vip = role_configs['glance_vip']
add_service_with_host(self.services, 'CONFIG_GLANCE_HOST', role_configs['glance_vip'])
else:
self.glance_vip = role_configs['vip']
add_service_with_host(self.services, 'CONFIG_GLANCE_HOST', role_configs['vip'])
if role_configs['public_vip']:
vip = role_configs['public_vip']
self.public_vip = role_configs['public_vip']
else:
vip = role_configs['vip']
self.public_vip = vip
add_service_with_host(self.services,
'CONFIG_NOVA_VNCPROXY_HOST', vip)
add_service_with_host(self.services, 'CONFIG_PUBLIC_IP', vip)
add_service_with_host(self.services, 'CONFIG_HORIZON_HOST', vip)
add_service_with_host(self.services, 'CONFIG_ADMIN_IP', vip)
add_service_with_host(self.services, 'CONFIG_INTERNAL_IP', vip)
def prepare_role_service(self, is_ha, service, role_configs):
host_key_name = "CONFIG_%s_HOST" % service
hosts_key_name = "CONFIG_%s_HOSTS" % service
add_service_with_hosts(self.services, hosts_key_name,
role_configs['host_interfaces'])
if service != 'LB' and service not in ['NOVA_VNCPROXY', 'MARIADB', 'GLANCE', 'HORIZON']:
add_service_with_host(self.services, host_key_name,
role_configs['vip'])
if is_ha and service == 'LB':
add_service_with_hosts(
self.services, 'CONFIG_LB_FRONTEND_HOSTS',
role_configs['host_interfaces'])
def prepare_mode(self, is_ha, is_lb, service):
mode_key = "CONFIG_%s_INSTALL_MODE" % service
if is_ha:
self.modes.update({mode_key: 'HA'})
elif is_lb:
self.modes.update({mode_key: 'LB'})
# special process
if service == 'GLANCE':
self.modes.update(
{'CONFIG_GLANCE_API_INSTALL_MODE': 'LB'})
self.modes.update(
{'CONFIG_GLANCE_REGISTRY_INSTALL_MODE': 'LB'})
#if s == 'HEAT':
# self.modes.update({'CONFIG_HEAT_API_INSTALL_MODE': 'LB'})
# self.modes.update({'CONFIG_HEAT_API_CFN_INSTALL_MODE': 'LB'})
#if s == 'CEILOMETER':
# self.modes.update({'CONFIG_CEILOMETER_API_INSTALL_MODE': 'LB'})
if service == 'IRONIC':
self.modes.update(
{'CONFIG_IRONIC_API_INSTALL_MODE': 'LB'})
else:
self.modes.update({mode_key: 'None'})
def prepare_services_in_component(self, component, service, role_configs):
if component not in self.services_in_component.keys():
self.services_in_component[component] = {}
self.services_in_component[component]["service"] = []
self.services_in_component[component]["service"].append(service_map[service])
if component == "horizon":
self.services_in_component[component]["fip"] = self.public_vip
elif component == "database":
self.services_in_component[component]["fip"] = self.db_vip
elif component == "glance":
self.services_in_component[component]["fip"] = self.glance_vip
else:
self.services_in_component[component]["fip"] = role_configs["vip"]
network_name = ''
if component in ['horizon'] and role_configs["host_interfaces"][0].has_key('public'):
network_name = 'public'
else:
network_name = 'management'
self.services_in_component[component]["netmask"] = \
role_configs["host_interfaces"][0][network_name]["netmask"]
self.services_in_component[component]["nic_name"] = \
role_configs["host_interfaces"][0][network_name]["name"]
if component == 'loadbalance' and \
self.all_configs.has_key('CONTROLLER_LB') and \
self.all_configs['CONTROLLER_LB']['vip']:
self.services_in_component[component]["fip"] = \
self.all_configs['CONTROLLER_LB']['vip']
def prepare_amqp_mariadb(self):
if self.lb_vip:
amqp_vip = ''
if self.modes['CONFIG_AMQP_INSTALL_MODE'] == 'LB':
amqp_vip = self.lb_vip
add_service_with_host(
self.services,
'CONFIG_AMQP_CLUSTER_MASTER_NODE_IP',
self.ha_master_host['ip'])
add_service_with_host(
self.services, 'CONFIG_AMQP_CLUSTER_MASTER_NODE_HOSTNAME',
self.ha_master_host['hostname'])
else:
amqp_vip = self.ha_vip
amqp_dict = "{'%s':'%s,%s,%s,%s'}" % (amqp_vip, self.ha_vip,
self.lb_vip, self.glance_vip, self.public_vip)
mariadb_dict = "{'%s':'%s,%s,%s,%s'}" % (self.db_vip, self.ha_vip,
self.lb_vip, self.glance_vip, self.public_vip)
add_service_with_host(self.services, 'CONFIG_LB_HOST', self.lb_vip)
elif self.ha_vip:
amqp_dict = "{'%s':'%s,%s,%s'}" % (self.ha_vip, self.ha_vip,
self.glance_vip, self.public_vip)
mariadb_dict = "{'%s':'%s,%s,%s'}" % (self.db_vip, self.ha_vip,
self.glance_vip, self.public_vip)
else:
amqp_dict = "{}"
mariadb_dict = "{}"
if self.lb_vip or self.ha_vip:
add_service_with_host(self.services, 'CONFIG_MARIADB_DICT',
mariadb_dict)
add_service_with_host(self.services, 'CONFIG_AMQP_DICT', amqp_dict)
def prepare(self):
for role_name, role_configs in self.all_configs.items():
if role_name == "OTHER":
continue
is_ha = re.match(".*_HA$", role_name) is not None
is_lb = re.match(".*_LB$", role_name) is not None
is_child_cell = re.match(".*_CHILD_CELL.*", role_name) is not None
if is_child_cell:
self.prepare_child_cell(role_name, role_configs)
continue
self.prepare_ha_lb(role_configs, is_ha, is_lb)
for service, component in role_configs['services'].items():
s = service.strip().upper().replace('-', '_')
self.prepare_role_service(is_ha, s, role_configs)
self.prepare_mode(is_ha, is_lb, s)
if is_lb:
self.lb_components.append(component)
c = "CONFIG_%s_INSTALL" % \
component.strip().upper().replace('-', '_')
self.components.append(c)
if is_ha:
if component == 'log':
continue
self.prepare_services_in_component(component, service,
role_configs)
if is_ha:
self.get_heartbeats(role_configs['host_interfaces'])
self.prepare_amqp_mariadb()
def update_conf_with_services(self, tecs):
for s in self.services:
if tecs.has_option("general", s):
print "%s is update" % s
if type(self.services[s]) is types.ListType:
if self.services[s] and not self.services[s][0]:
return
tecs.set("general", s, ','.join(self.services[s]))
else:
print "service %s is not exit in conf file" % s
def update_conf_with_components(self, tecs):
for s in self.components:
if tecs.has_option("general", s):
print "Component %s is update" % s
tecs.set("general", s, 'y')
else:
print "component %s is not exit in conf file" % s
def update_conf_with_modes(self, tecs):
for k, v in self.modes.items():
if tecs.has_option("general", k):
print "mode %s is update" % k
tecs.set("general", k, v)
else:
print "mode %s is not exit in conf file" % k
def update_tecs_conf(self, tecs):
self.update_conf_with_services(tecs)
self.update_conf_with_components(tecs)
self.update_conf_with_modes(tecs)
def update_ha_conf(self, ha, ha_nic_name, tecs=None):
print "heartbeat line is update"
heart_beat_list = []
if self.all_configs['OTHER'].get('dns_config'):
for heartbeat in self.heartbeats:
tmp_list = []
for name_ip in self.all_configs['OTHER']['dns_config']:
for tmp in heartbeat:
if tmp == name_ip.keys()[0]:
tmp_list.append(name_ip.values()[0])
heart_beat_list.append(tmp_list)
self.heartbeats = heart_beat_list
for k, v in self.services_in_component.items():
for name_ip in self.all_configs['OTHER']['dns_config']:
if v['fip'] == name_ip.keys()[0]:
v['fip'] = name_ip.values()[0]
ha.set('DEFAULT', 'heartbeat_link1', ','.join(self.heartbeats[0]))
ha.set('DEFAULT', 'heartbeat_link2', ','.join(self.heartbeats[1]))
ha.set('DEFAULT', 'heartbeat_link3', ','.join(self.heartbeats[2]))
ha.set('DEFAULT', 'components', ','.join(self.services_in_component.keys()))
for k, v in self.services_in_component.items():
print "component %s is update" % k
ha.set('DEFAULT', k, ','.join(v['service']))
if k == 'glance':
if 'glance' in self.share_disk_services:
ha.set('DEFAULT', 'glance_device_type', 'iscsi')
ha.set('DEFAULT', 'glance_device', '/dev/mapper/vg_glance-lv_glance')
ha.set('DEFAULT', 'glance_fs_type', 'ext4')
else:
ha.set('DEFAULT', 'glance_device_type', 'drbd')
ha.set('DEFAULT', 'glance_device', '/dev/vg_data/lv_glance')
ha.set('DEFAULT', 'glance_fs_type', 'ext4')
# mariadb now not support db cluster, don't support share disk.
if k == "database":
if 'db' in self.share_disk_services:
ha.set('DEFAULT', 'database_device', '/dev/mapper/vg_db-lv_db')
ha.set('DEFAULT', 'database_fs_type', 'ext4')
if "mongod" in v['service']:
if 'mongodb' in self.share_disk_services:
ha.set('DEFAULT', 'mongod_device', '/dev/mapper/vg_mongodb-lv_mongodb')
ha.set('DEFAULT', 'mongod_fs_type', 'ext4')
ha.set('DEFAULT', 'mongod_local', '')
if tecs:
tecs.set("general", 'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'n')
else:
ha.set('DEFAULT', 'mongod_fs_type', 'ext4')
ha.set('DEFAULT', 'mongod_local', 'yes')
if tecs:
tecs.set("general", 'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'y')
if k not in self.lb_components:
# if "bond" in v['nic_name']:
# v['nic_name'] = "vport"
ha.set('DEFAULT', k+'_fip', v['fip'])
if ha_nic_name and k not in ['horizon']:
nic_name = ha_nic_name
else:
nic_name = v['nic_name']
ha.set('DEFAULT', k+'_nic', nic_name)
cidr_netmask = reduce(lambda x, y: x + y,
[bin(int(i)).count('1') for i in v['netmask'].split('.')])
ha.set('DEFAULT', k+'_netmask', cidr_netmask)
def update_conf(tecs, key, value):
tecs.set("general", key, value)
def get_conf(tecs_conf_file, **kwargs):
result = {}
if not kwargs:
return result
tecs = ConfigParser()
tecs.optionxform = str
tecs.read(tecs_conf_file)
result = {key : tecs.get("general", kwargs.get(key, None))
for key in kwargs.keys()
if tecs.has_option("general", kwargs.get(key, None))}
return result
def _get_physnics_info(network_type, phynics):
# bond1(active-backup;lacp;eth1-eth2)
# eth0
# phynet1:eth0
# phynet1:bond1(active-backup;lacp;eth1-eth2), phynet2:eth3
phynics_info = []
if not phynics:
return
phynic_info = phynics.split("(")
if 2 == len(phynic_info):
phynic_info = phynic_info[1][0:-1].split(";")
phynics_info.extend(phynic_info[-1].split('-'))
else:
phynic_info = phynic_info[0].split(":")
if network_type == 'vlan':
phynics_info.append(phynic_info[1])
else:
phynics_info.append(phynic_info[0])
return phynics_info
def get_physnics_info(network_type, phynics):
# bond1(active-backup;lacp;eth1-eth2)
# phynet1:eth0
# phynet1:bond1(active-backup;lacp;eth1-eth2), phynet1:eth3
phynics_info = []
if network_type == 'vxlan':
phynics_info.extend(_get_physnics_info(network_type, phynics))
elif network_type == 'vlan':
phynics = phynics.split(',')
for phynic_info in phynics:
phynics_info.extend(_get_physnics_info(network_type, phynic_info))
return phynics_info
def update_conf_with_zenic(tecs, zenic_configs):
zenic_vip = zenic_configs.get('vip')
if not zenic_vip:
return
auth = zenic_configs.get('auth')
if not auth:
auth = 'restconf:LkfhRDGIPyGzbWGM2uAaNQ=='
update_conf(tecs, 'CONFIG_ZENIC_USER_AND_PW', auth)
update_conf(tecs, 'CONFIG_ZENIC_API_NODE', '%s:8181' % zenic_vip)
ml2_drivers = tecs.get(
"general", 'CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS').split(',')
ml2_drivers.extend(['proxydriver'])
update_conf(
tecs, 'CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS', ','.join(ml2_drivers))
class DvsDaisyConfig(object):
def __init__(self, tecs, networks_config):
self.tecs = tecs
self.networks_config = networks_config
# common
self.dvs_network_type = []
self.dvs_vswitch_type = {}
self.dvs_physnics = []
self.enable_sdn = False
# for vlan
self.dvs_physical_mappings = []
self.dvs_bridge_mappings = []
# for vxlan
self.dvs_vtep_ip_ranges = []
self.dvs_vxlan_info = ''
self.dvs_domain_id = {}
def config_tecs_for_dvs(self):
self._get_dvs_config()
self._set_dvs_config()
def _get_dvs_config(self):
network = self.networks_config
vswitch_type = network.get('vswitch_type')
if not vswitch_type:
return
self.dvs_vswitch_type.update(vswitch_type)
network_type = network['network_config'].get('network_type')
if network_type in ['vlan']:
self.dvs_network_type.extend(['vlan'])
self._private_network_conf_for_dvs(network)
elif network_type in ['vxlan']:
self.dvs_network_type.extend(['vxlan'])
self._bearing_network_conf_for_dvs(network)
def _set_dvs_config(self):
if not self.networks_config.get('enable_sdn') and (
self.dvs_vswitch_type.get('ovs_agent_patch')) and (
len(self.dvs_vswitch_type.get('ovs_agent_patch')) > 0):
return
if not self.dvs_vswitch_type.get('ovs_agent_patch') and not self.dvs_vswitch_type.get('ovdk'):
return
update_conf(self.tecs, 'CONFIG_DVS_TYPE', self.dvs_vswitch_type)
update_conf(self.tecs, 'CONFIG_DVS_PHYSICAL_NICS',
",".join(set(self.dvs_physnics)))
if 'vlan' in self.dvs_network_type:
update_conf(self.tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS',
self.dvs_bridge_mappings)
update_conf(self.tecs, 'CONFIG_NEUTRON_OVS_PHYSNET_IFACES',
self.dvs_physical_mappings)
elif 'vxlan' in self.dvs_network_type:
update_conf(self.tecs, 'CONFIG_DVS_VXLAN_INFO',
self.dvs_vxlan_info)
update_conf(self.tecs, 'CONFIG_DVS_NODE_DOMAIN_ID',
self.dvs_domain_id)
update_conf(self.tecs, 'CONFIG_NEUTRON_ML2_VTEP_IP_RANGES',
self.dvs_vtep_ip_ranges)
'''
private_networks_config_for_dvs
{
network_config = {
enable_sdn = ''
network_type = ['vlan']
}
vswitch_type = { ===============> such as vxlan
'ovdk': ['192.168.0.2', '192.168.0.20'] ,
'ovs_agent_patch': ['192.168.0.21', '192.168.0.30']
}
physnics_config = {
physical_mappings = eth0 ===============> such as ovs vlan
bridge_mappings = ==========> private->name & physical_name
}
}
'''
def _private_network_conf_for_dvs(self, private_network):
self.dvs_vswitch_type.update(private_network.get('vswitch_type'))
self.dvs_bridge_mappings = \
private_network['physnics_config'].get('bridge_mappings')
self.dvs_physical_mappings = \
private_network['physnics_config'].get('physical_mappings')
self.dvs_physical_mappings = self.dvs_physical_mappings.encode('utf8')
self.dvs_physnics.extend(
get_physnics_info('vlan', self.dvs_physical_mappings))
'''
bearing_networks_config
{
network_config = {
enable_sdn = ''
network_type = ['vxlan']
vtep_ip_ranges=[['192.168.0.2','192.168.0.200']]==>bearing->ip_range
}
vswitch_type = { ==========> bearing->assigned_network
'ovdk': ['192.168.0.2', '192.168.0.20'] ,
'ovs_agent_patch': ['192.168.0.21', '192.168.0.30']
}
physnics_config = {
vxlan_info = eth0 ======>bearing->assigned_network->host_interface
domain_id = { ==========> bearing->assigned_network
'0': ['192.168.0.2', '192.168.0.20'] ,
'1': ['192.168.0.21', '192.168.0.30']
}
}
}
'''
def _bearing_network_conf_for_dvs(self, bearing_network):
self.dvs_vtep_ip_ranges.extend(
bearing_network['network_config'].get('vtep_ip_ranges'))
self.dvs_vswitch_type.update(bearing_network.get('vswitch_type'))
self.dvs_domain_id.update(
bearing_network['physnics_config'].get('dvs_domain_id'))
self.dvs_vxlan_info = \
bearing_network['physnics_config'].get('vxlan_info')
self.dvs_physnics.extend(
get_physnics_info('vxlan', self.dvs_vxlan_info))
default_tecs_conf_template_path = "/var/lib/daisy/tecs/"
tecs_conf_template_path = default_tecs_conf_template_path
def private_network_conf(tecs, private_networks_config):
if private_networks_config:
mode_str = {
'0':'(active-backup;off;"%s-%s")',
'1':'(balance-slb;off;"%s-%s")',
'2':'(balance-tcp;active;"%s-%s")'
}
config_neutron_sriov_bridge_mappings = []
config_neutron_sriov_physnet_ifaces = []
config_neutron_ovs_bridge_mappings = []
config_neutron_ovs_physnet_ifaces = []
for private_network in private_networks_config:
type = private_network.get('type', None)
name = private_network.get('name', None)
assign_networks = private_network.get('assigned_networks', None)
slave1 = private_network.get('slave1', None)
slave2 = private_network.get('slave2', None)
mode = private_network.get('mode', None)
if not type or not name or not assign_networks or not slave1 or not slave2 or not mode:
break
for assign_network in assign_networks:
network_type = assign_network.get('network_type', None)
# TODO:why ml2_type & physnet_name is null
ml2_type = assign_network.get('ml2_type', None)
physnet_name = assign_network.get('physnet_name', None)
if not network_type or not ml2_type or not physnet_name:
break
# ether
if 0 == cmp(type, 'ether') and 0 == cmp(network_type, 'PRIVATE'):
if 0 == cmp(ml2_type, 'sriov'):
config_neutron_sriov_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name))
config_neutron_sriov_physnet_ifaces.append("%s:%s" % (physnet_name, name))
elif 0 == cmp(ml2_type, 'ovs'):
config_neutron_ovs_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name))
config_neutron_ovs_physnet_ifaces.append("%s:%s" % (physnet_name, name))
# bond
elif 0 == cmp(type, 'bond') and 0 == cmp(network_type, 'PRIVATE'):
if 0 == cmp(ml2_type, 'sriov'):
config_neutron_sriov_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name))
config_neutron_sriov_physnet_ifaces.append(
"%s:%s" % (physnet_name, name + mode_str[mode] % (slave1, slave2)))
elif 0 == cmp(ml2_type, 'ovs'):
config_neutron_ovs_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name))
config_neutron_ovs_physnet_ifaces.append(
"%s:%s" % (physnet_name, name + mode_str[mode] % (slave1, slave2)))
if config_neutron_sriov_bridge_mappings:
update_conf(tecs,
'CONFIG_NEUTRON_SRIOV_BRIDGE_MAPPINGS',
",".join(config_neutron_sriov_bridge_mappings))
if config_neutron_sriov_physnet_ifaces:
update_conf(tecs,
'CONFIG_NEUTRON_SRIOV_PHYSNET_IFACES',
",".join(config_neutron_sriov_physnet_ifaces))
if config_neutron_ovs_bridge_mappings :
update_conf(tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS', ",".join(config_neutron_ovs_bridge_mappings))
if config_neutron_ovs_physnet_ifaces:
update_conf(tecs, 'CONFIG_NEUTRON_OVS_PHYSNET_IFACES', ",".join(config_neutron_ovs_physnet_ifaces))
def update_tecs_config(config_data, cluster_conf_path):
print "tecs config data is:"
import pprint
pprint.pprint(config_data)
msg="tecs config data is: %s" % config_data
LOG.info(msg)
daisy_tecs_path = tecs_conf_template_path
tecs_conf_template_file = os.path.join(daisy_tecs_path, "tecs.conf")
ha_conf_template_file = os.path.join(daisy_tecs_path, "HA.conf")
if not os.path.exists(cluster_conf_path):
os.makedirs(cluster_conf_path)
tecs_conf_out = os.path.join(cluster_conf_path, "tecs.conf")
ha_config_out = os.path.join(cluster_conf_path, "HA_1.conf")
tecs = ConfigParser()
tecs.optionxform = str
tecs.read(tecs_conf_template_file)
cluster_data = config_data['OTHER']['cluster_data']
update_conf(tecs, 'CLUSTER_ID', cluster_data['id'])
if cluster_data.has_key('networking_parameters'):
networking_parameters = cluster_data['networking_parameters']
if networking_parameters.has_key('base_mac') and networking_parameters['base_mac']:
update_conf(tecs, 'CONFIG_NEUTRON_BASE_MAC', networking_parameters['base_mac'])
if networking_parameters.has_key('gre_id_range') and len(networking_parameters['gre_id_range'])>1 \
and networking_parameters['gre_id_range'][0] and networking_parameters['gre_id_range'][1]:
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES', ("%s:%s" % (networking_parameters['gre_id_range'][0],networking_parameters['gre_id_range'][1])))
if networking_parameters.get("vni_range",['1000','3000']) and len(networking_parameters['vni_range'])>1 \
and networking_parameters['vni_range'][0] and networking_parameters['vni_range'][1]:
update_conf(tecs, 'CONFIG_NEUTRON_ML2_VNI_RANGES', ("%s:%s" % (networking_parameters['vni_range'][0],networking_parameters['vni_range'][1])))
if networking_parameters.get("segmentation_type","vlan"):
segmentation_type = networking_parameters.get("segmentation_type","vlan")
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES', segmentation_type)
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TYPE_DRIVERS', segmentation_type)
physic_network_cfg = config_data['OTHER']['physic_network_config']
if physic_network_cfg.get('json_path', None):
update_conf(tecs, 'CONFIG_NEUTRON_ML2_JSON_PATH', physic_network_cfg['json_path'])
if physic_network_cfg.get('vlan_ranges', None):
update_conf(tecs, 'CONFIG_NEUTRON_ML2_VLAN_RANGES',physic_network_cfg['vlan_ranges'])
if config_data['OTHER']['tecs_installed_hosts']:
update_conf(tecs, 'EXCLUDE_SERVERS', ",".join(config_data['OTHER']['tecs_installed_hosts']))
ha = ConfigParser()
ha.optionxform = str
ha.read(ha_conf_template_file)
config = AnalsyConfig(config_data)
if config_data['OTHER'].has_key('ha_nic_name'):
ha_nic_name = config_data['OTHER']['ha_nic_name']
else:
ha_nic_name = ""
config.prepare()
config.update_tecs_conf(tecs)
config.update_ha_conf(ha, ha_nic_name, tecs)
update_conf_with_zenic(tecs, config_data['OTHER']['zenic_config'])
if config_data['OTHER']['dvs_config'].has_key('network_config'):
config_data['OTHER']['dvs_config']['network_config']['enable_sdn'] = \
config_data['OTHER']['zenic_config'].get('vip', False)
dvs_config = DvsDaisyConfig(tecs, config_data['OTHER']['dvs_config'])
dvs_config.config_tecs_for_dvs()
tecs.write(open(tecs_conf_out, "w+"))
ha.write(open(ha_config_out, "w+"))
return
def test():
print("Hello, world!")

View File

@ -0,0 +1,230 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for tecs API
"""
import os
import copy
import subprocess
import time
import traceback
import webob.exc
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread
from daisy import i18n
from daisy import notifier
from daisy.common import exception
import daisy.registry.client.v1.api as registry
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
tecs_state = tecs_cmn.TECS_STATE
def _get_service_disk_for_disk_array(req, role_id):
disk_info = []
service_disks = tecs_cmn.get_service_disk_list(req, {'filters': {'role_id': role_id}})
for service_disk in service_disks:
share_disk = {}
if service_disk['disk_location'] == 'share':
share_disk['service'] = service_disk['service']
share_disk['lun'] = service_disk['lun']
share_disk['data_ips'] = service_disk['data_ips'].split(',')
share_disk['lvm_config'] = {}
share_disk['lvm_config']['size'] = service_disk['size']
share_disk['lvm_config']['vg_name'] = 'vg_%s' % service_disk['service']
share_disk['lvm_config']['lv_name'] = 'lv_%s' % service_disk['service']
share_disk['lvm_config']['fs_type'] = 'ext4'
disk_info.append(share_disk)
return disk_info
def _get_cinder_volume_for_disk_array(req, role_id):
cinder_volume_info = []
cinder_volumes = tecs_cmn.get_cinder_volume_list(req, {'filters': {'role_id': role_id}})
for cinder_volume in cinder_volumes:
cv_info = {}
cv_info['management_ips'] = cinder_volume['management_ips'].split(',')
cv_info['data_ips'] = cinder_volume['data_ips'].split(',')
cv_info['user_name'] = cinder_volume['user_name']
cv_info['user_pwd'] = cinder_volume['user_pwd']
index = cinder_volume['backend_index']
cv_info['backend'] = {index:{}}
cv_info['backend'][index]['volume_driver'] = cinder_volume['volume_driver']
cv_info['backend'][index]['volume_type'] = cinder_volume['volume_type']
cv_info['backend'][index]['pools'] = cinder_volume['pools'].split(',')
cinder_volume_info.append(cv_info)
return cinder_volume_info
def get_disk_array_info(req, cluster_id):
share_disk_info = []
volume_disk_info = {}
cinder_volume_disk_list = []
roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id)
for role in roles:
if role['deployment_backend'] != daisy_cmn.tecs_backend_name:
continue
if role['name'] == 'CONTROLLER_HA':
share_disks = _get_service_disk_for_disk_array(req, role['id'])
share_disk_info += share_disks
cinder_volumes = _get_cinder_volume_for_disk_array(req, role['id'])
cinder_volume_disk_list += cinder_volumes
if cinder_volume_disk_list:
volume_disk_info['disk_array'] = cinder_volume_disk_list
return (share_disk_info, volume_disk_info)
def get_host_min_mac(host_interfaces):
macs = [interface['mac'] for interface in host_interfaces
if interface['type'] == 'ether' and interface['mac']]
macs.sort()
return macs[0]
def get_ha_and_compute_ips(req, cluster_id):
controller_ha_nodes = {}
computer_ips = []
roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id)
cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id)
for role in roles:
if role['deployment_backend'] != daisy_cmn.tecs_backend_name:
continue
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id'])
for role_host in role_hosts:
#host has installed tecs are exclusive
if (role_host['status'] == tecs_state['ACTIVE'] or
role_host['status'] == tecs_state['UPDATING'] or
role_host['status'] == tecs_state['UPDATE_FAILED']):
continue
host_detail = daisy_cmn.get_host_detail(req,
role_host['host_id'])
host_ip = tecs_cmn.get_host_network_ip(req,
host_detail,
cluster_networks,
'MANAGEMENT')
if role['name'] == "CONTROLLER_HA":
pxe_mac = [interface['mac'] for interface in host_detail['interfaces']
if interface['is_deployment'] == True]
if pxe_mac and pxe_mac[0]:
controller_ha_nodes[host_ip] = pxe_mac[0]
else:
min_mac = get_host_min_mac(host_detail['interfaces'])
controller_ha_nodes[host_ip] = min_mac
if role['name'] == "COMPUTER":
computer_ips.append(host_ip)
return (controller_ha_nodes, computer_ips)
def config_ha_share_disk(share_disk_info, controller_ha_nodes):
error_msg = ""
cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json'
daisy_cmn.subprocess_call(cmd)
with open("/var/lib/daisy/tecs/storage_auto_config/base/control.json", "w") as fp:
json.dump(share_disk_info, fp, indent=2)
for host_ip in controller_ha_nodes.keys():
password = "ossdbg1"
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd)
cmd = 'clush -S -w %s "mkdir -p /home/tecs_install"' % (host_ip,)
daisy_cmn.subprocess_call(cmd)
try:
scp_bin_result = subprocess.check_output(
'scp -o StrictHostKeyChecking=no -r /var/lib/daisy/tecs/storage_auto_config %s:/home/tecs_install' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
error_msg = "scp /var/lib/daisy/tecs/storage_auto_config to %s failed!" % host_ip
return error_msg
try:
LOG.info(_("Config share disk for host %s" % host_ip))
cmd = "cd /home/tecs_install/storage_auto_config/; python storage_auto_config.py share_disk %s" % controller_ha_nodes[host_ip]
exc_result = subprocess.check_output('clush -S -w %s "%s"' % (host_ip,cmd),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
LOG.info(_("Storage script error message: %s" % e.output))
error_msg = "config Disk Array share disks on %s failed!" % host_ip
return error_msg
return error_msg
def config_ha_cinder_volume(volume_disk_info, controller_ha_ips):
error_msg = ""
cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json'
daisy_cmn.subprocess_call(cmd)
with open("/var/lib/daisy/tecs/storage_auto_config/base/cinder.json", "w") as fp:
json.dump(volume_disk_info, fp, indent=2)
for host_ip in controller_ha_ips:
password = "ossdbg1"
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd)
cmd = 'clush -S -w %s "mkdir -p /home/tecs_install"' % (host_ip,)
daisy_cmn.subprocess_call(cmd)
try:
scp_bin_result = subprocess.check_output(
'scp -o StrictHostKeyChecking=no -r /var/lib/daisy/tecs/storage_auto_config %s:/home/tecs_install' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
error_msg = "scp /var/lib/daisy/tecs/storage_auto_config to %s failed!" % host_ip
return error_msg
try:
LOG.info(_("Config cinder volume for host %s" % host_ip))
cmd = 'cd /home/tecs_install/storage_auto_config/; python storage_auto_config.py cinder_conf %s' % host_ip
exc_result = subprocess.check_output('clush -S -w %s "%s"' % (host_ip,cmd),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
LOG.info(_("Storage script error message: %s" % e.output))
error_msg = "config Disk Array cinder volumes on %s failed!" % host_ip
return error_msg
return error_msg
def config_compute_multipath(all_nodes_ip):
error_msg = ""
for host_ip in all_nodes_ip:
password = "ossdbg1"
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd)
cmd = 'clush -S -w %s "mkdir -p /home/tecs_install"' % (host_ip,)
daisy_cmn.subprocess_call(cmd)
try:
scp_bin_result = subprocess.check_output(
'scp -o StrictHostKeyChecking=no -r /var/lib/daisy/tecs/storage_auto_config %s:/home/tecs_install' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
error_msg = "scp /var/lib/daisy/tecs/storage_auto_config to %s failed!" % host_ip
return error_msg
try:
LOG.info(_("Config multipath for host %s" % host_ip))
cmd = 'cd /home/tecs_install/storage_auto_config/; python storage_auto_config.py check_multipath'
exc_result = subprocess.check_output('clush -S -w %s "%s"' % (host_ip,cmd),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
LOG.info(_("Storage script error message: %s" % e.output))
error_msg = "config Disk Array multipath on %s failed!" % host_ip
return error_msg
return error_msg

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,155 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/hosts endpoint for Daisy v1 API
"""
import webob.exc
import subprocess
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread, Lock
import threading
from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy.api.v1 import controller
from daisy.api.v1 import filters
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn
import daisy.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
tecs_state = tecs_cmn.TECS_STATE
def update_progress_to_db(req, role_id_list, status, hosts_list,host_ip=None):
"""
Write uninstall progress and status to db, we use global lock object 'uninstall_mutex'
to make sure this function is thread safety.
:param req: http req.
:param role_id_list: Column neeb be update in role table.
:param status: Uninstall status.
:return:
"""
for role_id in role_id_list:
role_hosts = daisy_cmn.get_hosts_of_role(req, role_id)
for host_id_ip in hosts_list:
host_ip_tmp=host_id_ip.values()[0]
host_id_tmp=host_id_ip.keys()[0]
if host_ip:
for role_host in role_hosts:
if (host_ip_tmp == host_ip and
role_host['host_id']== host_id_tmp):
role_host_meta = {}
if 0 == cmp(status, tecs_state['UNINSTALLING']):
role_host_meta['progress'] = 10
role_host_meta['messages'] = 'TECS uninstalling'
if 0 == cmp(status, tecs_state['UNINSTALL_FAILED']):
role_host_meta['messages'] = 'TECS uninstalled failed'
elif 0 == cmp(status, tecs_state['ACTIVE']):
role_host_meta['progress'] = 100
role_host_meta['messages'] = 'TECS uninstalled successfully'
if role_host_meta:
role_host_meta['status'] = status
daisy_cmn.update_role_host(req,
role_host['id'],
role_host_meta)
else:
role = {}
if 0 == cmp(status, tecs_state['UNINSTALLING']):
for role_host in role_hosts:
role_host_meta = {}
role_host_meta['status'] = status
role_host_meta['progress'] = 0
daisy_cmn.update_role_host(req,
role_host['id'],
role_host_meta)
role['progress']=0
role['messages'] = 'TECS uninstalling'
if 0 == cmp(status, tecs_state['UNINSTALL_FAILED']):
role['messages'] = 'TECS uninstalled failed'
elif 0 == cmp(status, tecs_state['INIT']):
role['progress'] = 100
role['messages'] = 'TECS uninstalled successfully'
if role:
role['status'] = status
daisy_cmn.update_role(req, role_id, role)
if 0 == cmp(status, tecs_state['INIT']):
daisy_cmn.delete_role_hosts(req, role_id)
def _thread_bin(req, host_ip, role_id_list,hosts_list):
# uninstall network-configuration-1.1.1-15.x86_64.rpm
update_progress_to_db(req,role_id_list,tecs_state['UNINSTALLING'],hosts_list,host_ip)
tecs_cmn.TecsShellExector(host_ip, 'uninstall_rpm')
cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/'
daisy_cmn.subprocess_call(cmd)
password = "ossdbg1"
var_log_path = "/var/log/daisy/daisy_uninstall/%s_uninstall_tecs.log" % host_ip
with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd,fp)
cmd = 'clush -S -b -w %s "rm -rf /home/daisy_uninstall"' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp)
cmd = 'clush -S -w %s "mkdir -p /home/daisy_uninstall"' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp)
try:
scp_bin_result = subprocess.check_output(
'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin --dest=/home/daisy_uninstall' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, tecs_state['UNINSTALL_FAILED'],hosts_list,host_ip)
LOG.error(_("scp TECS bin for %s failed!" % host_ip))
fp.write(e.output.strip())
cmd = 'clush -S -w %s "chmod 777 /home/daisy_uninstall/*"' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp)
try:
exc_result = subprocess.check_output(
'clush -S -w %s /home/daisy_uninstall/ZXTECS*.bin clean' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, tecs_state['UNINSTALL_FAILED'],hosts_list,host_ip)
LOG.error(_("Uninstall TECS for %s failed!" % host_ip))
fp.write(e.output.strip())
else:
update_progress_to_db(req, role_id_list, tecs_state['ACTIVE'], hosts_list,host_ip)
LOG.info(_("Uninstall TECS for %s successfully!" % host_ip))
fp.write(exc_result)
# this will be raise raise all the exceptions of the thread to log file
def thread_bin(req, host_ip, role_id_list, hosts_list):
try:
_thread_bin(req, host_ip, role_id_list, hosts_list)
except Exception as e:
LOG.exception(e.message)

View File

@ -0,0 +1,151 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/update endpoint for Daisy v1 API
"""
import webob.exc
import subprocess
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread, Lock
import threading
import time
from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
import daisy.registry.client.v1.api as registry
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.api.backends import os as os_handle
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
tecs_state = tecs_cmn.TECS_STATE
def update_progress_to_db(req,role_id_list,status,hosts_list,host_ip=None):
"""
Write update progress and status to db,
to make sure this function is thread safety.
:param req: http req.
:param role_id_list: Column neeb be update in role table.
:param status: Update status.
:return:
"""
for role_id in role_id_list:
role_hosts = daisy_cmn.get_hosts_of_role(req, role_id)
for host_id_ip in hosts_list:
host_ip_tmp=host_id_ip.values()[0]
host_id_tmp=host_id_ip.keys()[0]
if host_ip:
for role_host in role_hosts:
if (host_ip_tmp == host_ip and
role_host['host_id']== host_id_tmp):
role_host_meta = {}
if 0 == cmp(status, tecs_state['UPDATING']):
role_host_meta['progress'] = 10
role_host_meta['messages'] = 'TECS upgrading'
if 0 == cmp(status, tecs_state['UPDATE_FAILED']):
role_host_meta['messages'] = 'TECS upgraded failed'
elif 0 == cmp(status, tecs_state['ACTIVE']):
role_host_meta['progress'] = 100
role_host_meta['messages'] = 'TECS upgraded successfully'
if role_host_meta:
role_host_meta['status'] = status
daisy_cmn.update_role_host(req,
role_host['id'],
role_host_meta)
else:
role = {}
if 0 == cmp(status, tecs_state['UPDATING']):
for role_host in role_hosts:
role_host_meta = {}
role_host_meta['status'] = status
role_host_meta['progress'] = 0
role_host_meta['messages'] = 'TECS upgrading'
daisy_cmn.update_role_host(req,
role_host['id'],
role_host_meta)
role['progress']=0
role['messages'] = 'TECS upgrading'
if 0 == cmp(status, tecs_state['UPDATE_FAILED']):
role['messages'] = 'TECS upgraded failed'
elif 0 == cmp(status, tecs_state['ACTIVE']):
role['progress'] = 100
role['messages'] = 'TECS upgraded successfully'
if role:
role['status'] = status
daisy_cmn.update_role(req, role_id, role)
def thread_bin(req,role_id_list, host_ip,hosts_list):
# update network-configuration-1.1.1-15.x86_64.rpm
update_progress_to_db(req,role_id_list,tecs_state['UPDATING'],hosts_list,host_ip)
cmd = 'mkdir -p /var/log/daisy/daisy_update/'
daisy_cmn.subprocess_call(cmd)
password = "ossdbg1"
var_log_path = "/var/log/daisy/daisy_update/%s_update_tecs.log" % host_ip
with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd,fp)
cmd = 'clush -S -w %s "mkdir -p /home/daisy_update"' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp)
cmd = 'clush -S -b -w %s "rm -rf /home/daisy_update/ZXTECS*.bin"' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp)
tecs_cmn.TecsShellExector(host_ip, 'update_rpm')
try:
scp_bin_result = subprocess.check_output(
'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin --dest=/home/daisy_update' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_progress_to_db(req,role_id_list,tecs_state['UPDATE_FAILED'],hosts_list,host_ip)
LOG.error(_("scp TECS bin for %s failed!" % host_ip))
fp.write(e.output.strip())
return 1
cmd = 'clush -S -w %s "chmod 777 /home/daisy_update/*"' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp)
try:
exc_result = subprocess.check_output(
'clush -S -w %s "/home/daisy_update/ZXTECS*.bin upgrade"' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_progress_to_db(req,role_id_list,tecs_state['UPDATE_FAILED'],hosts_list,host_ip)
LOG.error(_("Update TECS for %s failed!" % host_ip))
fp.write(e.output.strip())
return 2
else:
update_progress_to_db(req,role_id_list,tecs_state['ACTIVE'],hosts_list,host_ip)
fp.write(exc_result)
return 0

View File

@ -0,0 +1,194 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for zenic API
"""
import os
import copy
import subprocess
import time
import commands
import traceback
import webob.exc
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from webob.exc import HTTPServerError
import threading
from threading import Thread
from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
import daisy.registry.client.v1.api as registry
from daisy.api.backends.zenic import config
from daisy.api.backends import driver
from daisy.api.network_api import network as neutron
from ironicclient import client as ironic_client
import daisy.api.backends.os as os_handle
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.zenic.common as zenic_cmn
import daisy.api.backends.zenic.install as instl
import daisy.api.backends.zenic.uninstall as unstl
import daisy.api.backends.zenic.upgrade as upgrd
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
zenic_state = zenic_cmn.ZENIC_STATE
class API(driver.DeploymentDriver):
def __init__(self):
super(API, self).__init__()
return
def install(self, req, cluster_id):
"""
Install zenic to a cluster.
param req: The WSGI/Webob Request object
cluster_id:cluster id
"""
#instl.pxe_server_build(req, install_meta)
# get hosts config which need to install OS
#hosts_need_os = instl.get_cluster_hosts_config(req, cluster_id)
# if have hosts need to install os, ZENIC installataion executed in OSInstallTask
#if hosts_need_os:
#os_install_obj = instl.OSInstallTask(req, cluster_id, hosts_need_os)
#os_install_thread = Thread(target=os_install_obj.run)
#os_install_thread.start()
#else:
LOG.info(_("No host need to install os, begin install ZENIC for cluster %s." % cluster_id))
zenic_install_task = instl.ZENICInstallTask(req, cluster_id)
zenic_install_task.start()
LOG.info((_("begin install zenic, please waiting....")))
time.sleep(5)
LOG.info((_("install zenic successfully")))
def uninstall(self, req, cluster_id):
"""
Uninstall ZENIC to a cluster.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
(role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(req, cluster_id)
if role_id_list:
if not hosts_list:
msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg)
unstl.update_progress_to_db(req, role_id_list, zenic_state['UNINSTALLING'], 0.0)
uninstall_progress_percentage = round(1*1.0/len(hosts_list), 2)*100
threads = []
for host in hosts_list:
t = threading.Thread(target=unstl.thread_bin,args=(req,host,role_id_list,uninstall_progress_percentage))
t.setDaemon(True)
t.start()
threads.append(t)
LOG.info(_("uninstall threads have started, please waiting...."))
try:
for t in threads:
t.join()
except:
LOG.warn(_("Join uninstall thread %s failed!" % t))
else:
uninstall_failed_flag = False
for role_id in role_id_list:
role = daisy_cmn.get_role_detail(req, role_id)
if role['progress'] == 100:
unstl.update_progress_to_db(req, role_id_list, zenic_state['UNINSTALL_FAILED'])
uninstall_failed_flag = True
break
if role['status'] == zenic_state['UNINSTALL_FAILED']:
uninstall_failed_flag = True
break
if not uninstall_failed_flag:
LOG.info(_("all uninstall threads have done, set all roles status to 'init'!"))
unstl.update_progress_to_db(req, role_id_list, zenic_state['INIT'])
LOG.info((_("begin uninstall zenic, please waiting....")))
time.sleep(5)
LOG.info((_("uninstall zenic successfully")))
def upgrade(self, req, cluster_id):
"""
update zenic to a cluster.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
(role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(req, cluster_id)
if not hosts_list:
msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg)
upgrd.update_progress_to_db(req, role_id_list, zenic_state['UPDATING'], 0.0)
update_progress_percentage = round(1*1.0/len(hosts_list), 2)*100
threads = []
for host in hosts_list:
t = threading.Thread(target=upgrd.thread_bin,args=(req,host,role_id_list,update_progress_percentage))
t.setDaemon(True)
t.start()
threads.append(t)
LOG.info(_("upgrade threads have started, please waiting...."))
try:
for t in threads:
t.join()
except:
LOG.warn(_("Join upgrade thread %s failed!" % t))
else:
update_failed_flag = False
for role_id in role_id_list:
role = daisy_cmn.get_role_detail(req, role_id)
if role['progress'] == 0:
upgrd.update_progress_to_db(req, role_id_list, zenic_state['UPDATE_FAILED'])
update_failed_flag = True
break
if role['status'] == zenic_state['UPDATE_FAILED']:
update_failed_flag = True
break
if not update_failed_flag:
LOG.info(_("all update threads have done, set all roles status to 'active'!"))
upgrd.update_progress_to_db(req, role_id_list, zenic_state['ACTIVE'])

View File

@ -0,0 +1,300 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for zenic API
"""
import os
import copy
import subprocess
import time
import traceback
import webob.exc
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread
from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
import daisy.registry.client.v1.api as registry
import daisy.api.backends.common as daisy_cmn
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
daisy_zenic_path = '/var/lib/daisy/zenic/'
ZENIC_STATE = {
'INIT' : 'init',
'INSTALLING' : 'installing',
'ACTIVE' : 'active',
'INSTALL_FAILED': 'install-failed',
'UNINSTALLING': 'uninstalling',
'UNINSTALL_FAILED': 'uninstall-failed',
'UPDATING': 'updating',
'UPDATE_FAILED': 'update-failed',
}
def get_cluster_hosts(req, cluster_id):
try:
cluster_hosts = registry.get_cluster_hosts(req.context, cluster_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return cluster_hosts
def get_host_detail(req, host_id):
try:
host_detail = registry.get_host_metadata(req.context, host_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return host_detail
def get_roles_detail(req):
try:
roles = registry.get_roles_detail(req.context)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return roles
def get_hosts_of_role(req, role_id):
try:
hosts = registry.get_role_host_metadata(req.context, role_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return hosts
def get_role_detail(req, role_id):
try:
role = registry.get_role_metadata(req.context, role_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return role
def update_role(req, role_id,role_meta):
try:
registry.update_role_metadata(req.context, role_id, role_meta)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
def update_role_host(req, role_id, role_host):
try:
registry.update_role_host_metadata(req.context, role_id, role_host)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
def delete_role_hosts(req, role_id):
try:
registry.delete_role_host_metadata(req.context, role_id)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
def _get_cluster_network(cluster_networks, network_type):
network = [cn for cn in cluster_networks
if cn['name'] in network_type]
if not network or not network[0]:
msg = "network %s is not exist" % (network_type)
raise exception.InvalidNetworkConfig(msg)
else:
return network[0]
def get_host_interface_by_network(host_detail, network_type):
host_detail_info = copy.deepcopy(host_detail)
interface_list = [hi for hi in host_detail_info['interfaces']
for assigned_network in hi['assigned_networks']
if assigned_network and network_type == assigned_network['name']]
interface = {}
if interface_list:
interface = interface_list[0]
if not interface:
msg = "network %s of host %s is not exist" % (network_type, host_detail_info['id'])
raise exception.InvalidNetworkConfig(msg)
return interface
def get_host_network_ip(req, host_detail, cluster_networks, network_type):
interface_network_ip = ''
host_interface = get_host_interface_by_network(host_detail, network_type)
if host_interface:
network = _get_cluster_network(cluster_networks, network_type)
assigned_network = daisy_cmn.get_assigned_network(req,
host_interface['id'],
network['id'])
interface_network_ip = assigned_network['ip']
if not interface_network_ip:
msg = "%s network ip of host %s can't be empty" % (network_type, host_detail['id'])
raise exception.InvalidNetworkConfig(msg)
return interface_network_ip
def get_deploy_node_cfg(req, host_detail, cluster_networks):
host_deploy_network = get_host_interface_by_network(host_detail, 'DEPLOYMENT')
host_deploy_ip = get_host_network_ip(req, host_detail, cluster_networks, 'DEPLOYMENT')
if not host_deploy_ip:
msg = "deployment ip of host %s can't be empty" % host_detail['id']
raise exception.InvalidNetworkConfig(msg)
host_deploy_macname = host_deploy_network['name']
if not host_deploy_macname:
msg = "deployment macname of host %s can't be empty" % host_detail['id']
raise exception.InvalidNetworkConfig(msg)
host_mgt_ip = get_host_network_ip(req, host_detail, cluster_networks, 'MANAGEMENT')
if not host_mgt_ip:
msg = "management ip of host %s can't be empty" % host_detail['id']
raise exception.InvalidNetworkConfig(msg)
memmode = 'tiny'
host_memory = 0
if host_detail.has_key('memory'):
host_memory = (int(host_detail['memory']['total'].strip().split()[0]))/(1024*1024)
if host_memory < 8:
memmode = 'tiny'
elif host_memory < 16:
memmode = 'small'
elif host_memory < 32:
memmode = 'medium'
else:
memmode = 'large'
deploy_node_cfg = {}
deploy_node_cfg.update({'hostid':host_detail['id']})
deploy_node_cfg.update({'hostname':host_detail['name']})
deploy_node_cfg.update({'nodeip':host_deploy_ip})
deploy_node_cfg.update({'MacName':host_deploy_macname})
deploy_node_cfg.update({'memmode':memmode})
deploy_node_cfg.update({'mgtip':host_mgt_ip})
return deploy_node_cfg
def get_roles_and_hosts_list(req, cluster_id):
roles_id_list = set()
hosts_id_list = set()
hosts_list = []
cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id)
roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id)
for role in roles:
if role['deployment_backend'] != daisy_cmn.zenic_backend_name:
continue
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id'])
if role_hosts:
for role_host in role_hosts:
if role_host['host_id'] not in hosts_id_list:
host = daisy_cmn.get_host_detail(req, role_host['host_id'])
host_ip = get_host_network_ip(req, host, cluster_networks, 'MANAGEMENT')
hosts_id_list.add(host['id'])
host_cfg = {}
host_cfg['mgtip'] = host_ip
host_cfg['rootpwd'] = host['root_pwd']
hosts_list.append(host_cfg)
roles_id_list.add(role['id'])
return (roles_id_list, hosts_list)
def check_and_get_zenic_version(daisy_zenic_pkg_path):
zenic_version_pkg_file = ""
zenic_version_pkg_name = ""
get_zenic_version_pkg = "ls %s| grep ^ZENIC.*\.zip$" % daisy_zenic_pkg_path
obj = subprocess.Popen(get_zenic_version_pkg,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
if stdoutput:
zenic_version_pkg_name = stdoutput.split('\n')[0]
zenic_version_pkg_file = daisy_zenic_pkg_path + zenic_version_pkg_name
chmod_for_zenic_version = 'chmod +x %s' % zenic_version_pkg_file
daisy_cmn.subprocess_call(chmod_for_zenic_version)
return (zenic_version_pkg_file,zenic_version_pkg_name)
class ZenicShellExector():
"""
Class config task before install zenic bin.
"""
def __init__(self, mgt_ip, task_type, params={}):
self.task_type = task_type
self.mgt_ip = mgt_ip
self.params = params
self.clush_cmd = ""
self.PKG_NAME = self.params['pkg_name']
self.PKG_PATH = daisy_zenic_path + self.PKG_NAME
self.CFG_PATH =daisy_zenic_path + mgt_ip + "_zenic.conf"
self.oper_type = {
'install' : self._install_pkg
}
self.oper_shell = {
'CMD_SSHPASS_PRE' : "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s",
'CMD_CFG_SCP' : "scp %(path)s root@%(ssh_ip)s:/etc/zenic/config" %
{'path': self.CFG_PATH, 'ssh_ip':mgt_ip},
'CMD_PKG_UNZIP' : "unzip /home/workspace/%(pkg_name)s -d /home/workspace/PKG" % {'pkg_name':self.PKG_NAME},
'CMD_PKG_SCP' : "scp %(path)s root@%(ssh_ip)s:/home/workspace/" %
{'path': self.PKG_PATH, 'ssh_ip':mgt_ip}
}
self._execute()
def _install_pkg(self):
if not os.path.exists(self.CFG_PATH):
LOG.error(_("<<<CFG %s not exist>>>" % self.CFG_PATH))
return
if not os.path.exists(self.PKG_PATH):
LOG.error(_("<<<PKG %s not exist>>>" % self.PKG_PATH))
return
self.clush_cmd = "%s;%s;%s" % \
(self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip":"", "cmd":self.oper_shell['CMD_PKG_SCP']}, \
self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip":"", "cmd":self.oper_shell['CMD_CFG_SCP']}, \
self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip":"ssh " + self.mgt_ip, "cmd":self.oper_shell['CMD_PKG_UNZIP']})
subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT)
def _execute(self):
try:
if not self.task_type or not self.mgt_ip :
LOG.error(_("<<<ZenicShellExector::execute, input params invalid!>>>"))
return
self.oper_type[self.task_type]()
except subprocess.CalledProcessError as e:
LOG.warn(_("<<<ZenicShellExector::execute:Execute command failed! Reason:%s>>>" % e.output.strip()))
except Exception as e:
LOG.exception(_(e.message))
else:
LOG.info(_("<<<ZenicShellExector::execute:Execute command:%s,successful!>>>" % self.clush_cmd))

View File

@ -0,0 +1,135 @@
# -*- coding: utf-8 -*-
import os
import re
import commands
import types
import subprocess
from ConfigParser import ConfigParser
from daisy.common import exception
default_zenic_conf_template_path = "/var/lib/daisy/zenic/"
zenic_conf_template_path = default_zenic_conf_template_path
def update_conf(zenic, key, value):
zenic.set("general", key, value)
def get_conf(zenic_conf_file, **kwargs):
result = {}
if not kwargs:
return result
zenic = ConfigParser()
zenic.optionxform = str
zenic.read(zenic_conf_file)
result = {key : zenic.get("general", kwargs.get(key, None))
for key in kwargs.keys()
if zenic.has_option("general", kwargs.get(key, None))}
return result
def get_nodeid(deploy_ip,zbp_ips):
nodeid = 0
i = 0
for ip in zbp_ips:
if deploy_ip == ip:
break
else:
i=i+1
if i == 0:
nodeid = 1
elif i == 1:
nodeid = 256
else:
nodeid = i
return nodeid
def update_zenic_conf(config_data, cluster_conf_path):
print "zenic config data is:"
import pprint
pprint.pprint(config_data)
daisy_zenic_path = zenic_conf_template_path
zenic_conf_template_file = os.path.join(daisy_zenic_path, "zenic.conf")
if not os.path.exists(cluster_conf_path):
os.makedirs(cluster_conf_path)
zenic = ConfigParser()
zenic.optionxform = str
zenic.read(zenic_conf_template_file)
zbpips = ''
for ip in config_data['zbp_ips']:
if not zbpips:
zbpips = ip
else:
zbpips = zbpips + ',' + ip
update_conf(zenic, 'zbpips', zbpips)
update_conf(zenic, 'zbp_node_num', config_data['zbp_node_num'])
nodelist = '1,256'
if len(config_data['zbp_ips']) > 2:
for i in range(2,len(config_data['zbp_ips'])):
nodelist = nodelist + ',' + 'i'
update_conf(zenic, 'zbpnodelist',nodelist)
zampips = ''
for ip in config_data['zamp_ips']:
if not zampips:
zampips = ip
else:
zampips = zampips + ',' + ip
update_conf(zenic, 'zampips', zampips)
update_conf(zenic, 'zamp_node_num', config_data['zamp_node_num'])
mongodbips = ''
for ip in config_data['mongodb_ips']:
if not mongodbips:
mongodbips = ip
else:
mongodbips = mongodbips + ',' + ip
update_conf(zenic, 'mongodbips', mongodbips)
update_conf(zenic, 'mongodb_node_num', config_data['mongodb_node_num'])
update_conf(zenic, 'zamp_vip', config_data['zamp_vip'])
update_conf(zenic, 'mongodb_vip', config_data['mongodb_vip'])
deploy_hosts = config_data['deploy_hosts']
for deploy_host in deploy_hosts:
nodeip = deploy_host['nodeip']
hostname = deploy_host['hostname']
MacName = deploy_host['MacName']
memmode = deploy_host['memmode']
update_conf(zenic,'nodeip',nodeip)
update_conf(zenic,'hostname',hostname)
update_conf(zenic,'MacName',MacName)
update_conf(zenic,'memmode',memmode)
nodeid = get_nodeid(nodeip,config_data['zbp_ips'])
update_conf(zenic,'nodeid',nodeid)
if nodeip in config_data['zamp_ips']:
update_conf(zenic,'needzamp','y')
else:
update_conf(zenic,'needzamp','n')
zenic_conf = "%s_zenic.conf" % deploy_host['mgtip']
zenic_conf_cluster_out = os.path.join(cluster_conf_path, zenic_conf)
zenic_conf_out = os.path.join(daisy_zenic_path, zenic_conf)
zenic.write(open(zenic_conf_cluster_out, "w+"))
with open(zenic_conf_cluster_out,'r') as fr,open(zenic_conf_out,'w') as fw:
for line in fr.readlines():
fw.write(line.replace(' ', ''))
return
def test():
print("Hello, world!")

View File

@ -0,0 +1,450 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for zenic API
"""
import os
import copy
import subprocess
import time
import traceback
import webob.exc
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from webob.exc import HTTPServerError
from threading import Thread, Lock
import threading
from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
import daisy.registry.client.v1.api as registry
from daisy.api.backends.zenic import config
from daisy.api.backends import driver
from daisy.api.network_api import network as neutron
from ironicclient import client as ironic_client
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.zenic.common as zenic_cmn
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
install_opts = [
cfg.StrOpt('max_parallel_os_number', default=10,
help='Maximum number of hosts install os at the same time.'),
]
CONF.register_opts(install_opts)
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
host_os_status = {
'INIT' : 'init',
'INSTALLING' : 'installing',
'ACTIVE' : 'active',
'FAILED': 'install-failed'
}
zenic_state = zenic_cmn.ZENIC_STATE
daisy_zenic_path = zenic_cmn.daisy_zenic_path
install_zenic_progress=0.0
install_mutex = threading.Lock()
def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.0):
"""
Write install progress and status to db, we use global lock object 'install_mutex'
to make sure this function is thread safety.
:param req: http req.
:param role_id_list: Column neeb be update in role table.
:param status: install status.
:return:
"""
global install_mutex
global install_zenic_progress
install_mutex.acquire(True)
install_zenic_progress += progress_percentage_step
role = {}
for role_id in role_id_list:
if 0 == cmp(status, zenic_state['INSTALLING']):
role['status'] = status
role['progress'] = install_zenic_progress
if 0 == cmp(status, zenic_state['INSTALL_FAILED']):
role['status'] = status
elif 0 == cmp(status, zenic_state['ACTIVE']):
role['status'] = status
role['progress'] = 100
daisy_cmn.update_role(req, role_id, role)
install_mutex.release()
def _ping_hosts_test(ips):
ping_cmd = 'fping'
for ip in set(ips):
ping_cmd = ping_cmd + ' ' + ip
obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
_returncode = obj.returncode
if _returncode == 0 or _returncode == 1:
ping_result = stdoutput.split('\n')
unreachable_hosts = [result.split()[0] for result in ping_result if result and result.split()[2] != 'alive']
else:
msg = "ping failed beaceuse there is invlid ip in %s" % ips
raise exception.InvalidIP(msg)
return unreachable_hosts
def _check_ping_hosts(ping_ips, max_ping_times):
if not ping_ips:
LOG.info(_("no ip got for ping test"))
return ping_ips
ping_count = 0
time_step = 5
LOG.info(_("begin ping test for %s" % ','.join(ping_ips)))
while True:
if ping_count == 0:
ips = _ping_hosts_test(ping_ips)
else:
ips = _ping_hosts_test(ips)
ping_count += 1
if ips:
LOG.debug(_("ping host %s for %s times" % (','.join(ips), ping_count)))
if ping_count >= max_ping_times:
LOG.info(_("ping host %s timeout for %ss" % (','.join(ips), ping_count*time_step)))
return ips
time.sleep(time_step)
else:
LOG.info(_("ping host %s success" % ','.join(ping_ips)))
time.sleep(120)
LOG.info(_("120s after ping host %s success" % ','.join(ping_ips)))
return ips
def _get_host_private_networks(host_detail, cluster_private_networks_name):
host_private_networks = [hi for pn in cluster_private_networks_name
for hi in host_detail['interfaces'] if pn in hi['assigned_networks']]
# If port type is bond,use pci segment of member port replace pci1 & pci2 segments of bond port
for interface_outer in host_private_networks:
if 0 != cmp(interface_outer.get('type', None), "bond"):
continue
slave1 = interface_outer.get('slave1', None)
slave2 = interface_outer.get('slave2', None)
if not slave1 or not slave2:
continue
interface_outer.pop('pci')
for interface_inner in host_detail['interfaces']:
if 0 == cmp(interface_inner.get('name', None), slave1):
interface_outer['pci1'] = interface_inner['pci']
elif 0 == cmp(interface_inner.get('name', None), slave2):
interface_outer['pci2'] = interface_inner['pci']
return host_private_networks
def get_cluster_zenic_config(req, cluster_id):
LOG.info(_("get zenic config from database..."))
params = dict(limit=1000000)
zenic_config = {}
deploy_hosts = []
deploy_host_cfg = {}
mgt_ip = ''
zbp_ip_list = set()
mgt_ip_list = set()
zamp_ip_list = set()
zamp_vip = ''
mongodb_ip_list = set()
mongodb_vip= ''
cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id)
all_roles = zenic_cmn.get_roles_detail(req)
roles = [role for role in all_roles if (role['cluster_id'] == cluster_id and role['deployment_backend'] == daisy_cmn.zenic_backend_name)]
for role in roles:
if not (role['name'] == 'ZENIC_CTL' or role['name'] == 'ZENIC_NFM'):
continue
if role['name'] == 'ZENIC_NFM':
if not zamp_vip:
zamp_vip = role['vip']
if not mongodb_vip:
mongodb_vip = role['mongodb_vip']
role_hosts = zenic_cmn.get_hosts_of_role(req, role['id'])
for role_host in role_hosts:
mgt_ip = ''
for deploy_host in deploy_hosts:
if role_host['host_id'] == deploy_host['hostid']:
mgt_ip = deploy_host['mgtip']
deploy_ip = deploy_host['nodeip']
break
if not mgt_ip:
host_detail = zenic_cmn.get_host_detail(req, role_host['host_id'])
deploy_host_cfg = zenic_cmn.get_deploy_node_cfg(req, host_detail, cluster_networks)
deploy_hosts.append(deploy_host_cfg)
mgt_ip = deploy_host_cfg['mgtip']
deploy_ip = deploy_host_cfg['nodeip']
mgt_ip_list.add(mgt_ip)
if role['name'] == 'ZENIC_CTL':
zbp_ip_list.add(deploy_ip)
elif role['name'] == 'ZENIC_NFM':
zamp_ip_list.add(deploy_ip)
mongodb_ip_list.add(deploy_ip)
else:
LOG.warn(_("<<<Zenic Install role %s is invalid >>>" % role['name']))
zenic_config.update({'deploy_hosts':deploy_hosts})
zenic_config.update({'zbp_ips':zbp_ip_list})
zenic_config.update({'zbp_node_num':len(zbp_ip_list)})
zenic_config.update({'zamp_ips':zamp_ip_list})
zenic_config.update({'zamp_node_num':len(zamp_ip_list)})
zenic_config.update({'mongodb_ips':mongodb_ip_list})
zenic_config.update({'mongodb_node_num':len(mongodb_ip_list)})
zenic_config.update({'zamp_vip':zamp_vip})
zenic_config.update({'mongodb_vip':mongodb_vip})
return (zenic_config, mgt_ip_list)
def generate_zenic_config_file(cluster_id, zenic_config):
LOG.info(_("generate zenic config..."))
if zenic_config:
cluster_conf_path = daisy_zenic_path + cluster_id
config.update_zenic_conf(zenic_config, cluster_conf_path)
def thread_bin(req,host, role_id_list, pkg_name, install_progress_percentage):
host_ip = host['mgtip']
password = host['rootpwd']
cmd = 'mkdir -p /var/log/daisy/daisy_install/'
daisy_cmn.subprocess_call(cmd)
var_log_path = "/var/log/daisy/daisy_install/%s_install_zenic.log" % host_ip
with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd,fp)
cmd = 'clush -S -b -w %s mkdir -p /home/workspace' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp)
cmd = 'clush -S -b -w %s mkdir -p /etc/zenic' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp)
cmd = 'clush -S -b -w %s rm -rf /etc/zenic/config' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp)
cmd = 'clush -S -b -w %s rm -rf /home/zenic' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp)
cmd = 'clush -S -b -w %s rm -rf /home/workspace/unipack' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp)
pkg_file = daisy_zenic_path + pkg_name
cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % (host_ip,pkg_name)
daisy_cmn.subprocess_call(cmd,fp)
cfg_file = daisy_zenic_path + host_ip + "_zenic.conf"
try:
exc_result = subprocess.check_output(
'sshpass -p ossdbg1 scp %s root@%s:/etc/zenic/config' % (cfg_file,host_ip,),
shell=True, stderr=fp)
except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("scp zenic pkg for %s failed!" % host_ip))
fp.write(e.output.strip())
exit()
else:
LOG.info(_("scp zenic config for %s successfully!" % host_ip))
fp.write(exc_result)
try:
exc_result = subprocess.check_output(
'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % (pkg_file,host_ip,),
shell=True, stderr=fp)
except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("scp zenic pkg for %s failed!" % host_ip))
fp.write(e.output.strip())
exit()
else:
LOG.info(_("scp zenic pkg for %s successfully!" % host_ip))
fp.write(exc_result)
cmd = 'clush -S -b -w %s unzip /home/workspace/%s -d /home/workspace/unipack' % (host_ip,pkg_name,)
daisy_cmn.subprocess_call(cmd)
try:
exc_result = subprocess.check_output(
'clush -S -b -w %s /home/workspace/unipack/node_install.sh' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("install zenic for %s failed!" % host_ip))
fp.write(e.output.strip())
exit()
else:
LOG.info(_("install zenic for %s successfully!" % host_ip))
fp.write(exc_result)
try:
exc_result = subprocess.check_output(
'clush -S -b -w %s /home/zenic/node_start.sh' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("start zenic for %s failed!" % host_ip))
fp.write(e.output.strip())
exit()
else:
update_progress_to_db(req, role_id_list, zenic_state['INSTALLING'], install_progress_percentage)
LOG.info(_("start zenic for %s successfully!" % host_ip))
fp.write(exc_result)
class ZENICInstallTask(Thread):
"""
Class for install tecs bin.
"""
""" Definition for install states."""
INSTALL_STATES = {
'INIT' : 'init',
'INSTALLING' : 'installing',
'ACTIVE' : 'active',
'FAILED': 'install-failed'
}
def __init__(self, req, cluster_id):
super(ZENICInstallTask, self).__init__()
self.req = req
self.cluster_id = cluster_id
self.progress = 0
self.state = ZENICInstallTask.INSTALL_STATES['INIT']
self.message = ""
self.zenic_config_file = ''
self.mgt_ip_list = ''
self.install_log_fp = None
self.last_line_num = 0
self.need_install = False
self.ping_times = 36
self.log_file = "/var/log/daisy/zenic_%s_install.log" % self.cluster_id
def run(self):
try:
self._run()
except (exception.InstallException,
exception.NotFound,
exception.InstallTimeoutException) as e:
LOG.exception(e.message)
else:
if not self.need_install:
return
self.progress = 100
self.state = zenic_state['ACTIVE']
self.message = "Zenic install successfully"
LOG.info(_("install Zenic for cluster %s successfully."
% self.cluster_id))
def _run(self):
(zenic_config, self.mgt_ip_list) = get_cluster_zenic_config(self.req, self.cluster_id)
if not self.mgt_ip_list:
msg = _("there is no host in cluster %s") % self.cluster_id
raise exception.ThreadBinException(msg)
unreached_hosts = _check_ping_hosts(self.mgt_ip_list, self.ping_times)
if unreached_hosts:
self.state = zenic_state['INSTALL_FAILED']
self.message = "hosts %s ping failed" % unreached_hosts
raise exception.NotFound(message=self.message)
generate_zenic_config_file(self.cluster_id, zenic_config)
# check and get ZENIC version
(zenic_version_pkg_file,zenic_version_pkg_name) = zenic_cmn.check_and_get_zenic_version(daisy_zenic_path)
if not zenic_version_pkg_file:
self.state = zenic_state['INSTALL_FAILED']
self.message = "ZENIC version file not found in %s" % daisy_zenic_path
raise exception.NotFound(message=self.message)
(role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(self.req, self.cluster_id)
update_progress_to_db(self.req, role_id_list, zenic_state['INSTALLING'], 0.0)
install_progress_percentage = round(1*1.0/len(hosts_list), 2)*100
threads = []
for host in hosts_list:
t = threading.Thread(target=thread_bin,args=(self.req,host,role_id_list,zenic_version_pkg_name,install_progress_percentage))
t.setDaemon(True)
t.start()
threads.append(t)
LOG.info(_("install threads have started, please waiting...."))
try:
for t in threads:
t.join()
except:
LOG.warn(_("Join install thread %s failed!" % t))
else:
install_failed_flag = False
for role_id in role_id_list:
role = daisy_cmn.get_role_detail(self.req, role_id)
if role['progress'] == 0:
update_progress_to_db(self.req, role_id_list, zenic_state['INSTALL_FAILED'])
install_failed_flag = True
break
if role['status'] == zenic_state['INSTALL_FAILED']:
install_failed_flag = True
break
if not install_failed_flag:
LOG.info(_("all install threads have done, set all roles status to 'active'!"))
update_progress_to_db(self.req, role_id_list, zenic_state['ACTIVE'])

View File

@ -0,0 +1,106 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/hosts endpoint for Daisy v1 API
"""
import os
import webob.exc
import subprocess
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread, Lock
import threading
from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.api.backends.zenic.common import ZenicShellExector
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.zenic.common as zenic_cmn
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
zenic_state = zenic_cmn.ZENIC_STATE
uninstall_zenic_progress=100.0
uninstall_mutex = threading.Lock()
def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.0):
"""
Write uninstall progress and status to db, we use global lock object 'uninstall_mutex'
to make sure this function is thread safety.
:param req: http req.
:param role_id_list: Column neeb be update in role table.
:param status: Uninstall status.
:return:
"""
global uninstall_mutex
global uninstall_zenic_progress
uninstall_mutex.acquire(True)
uninstall_zenic_progress -= progress_percentage_step
role = {}
for role_id in role_id_list:
if 0 == cmp(status, zenic_state['UNINSTALLING']):
role['status'] = status
role['progress'] = uninstall_zenic_progress
if 0 == cmp(status, zenic_state['UNINSTALL_FAILED']):
role['status'] = status
elif 0 == cmp(status, zenic_state['INIT']):
role['status'] = status
role['progress'] = 0
daisy_cmn.update_role(req, role_id, role)
uninstall_mutex.release()
def thread_bin(req, host, role_id_list,uninstall_progress_percentage):
host_ip = host['mgtip']
password = host['rootpwd']
cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/'
daisy_cmn.subprocess_call(cmd)
var_log_path = "/var/log/daisy/daisy_uninstall/%s_uninstall_zenic.log" % host_ip
with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd,fp)
try:
exc_result = subprocess.check_output(
'clush -S -b -w %s /home/zenic/node_stop.sh' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['UNINSTALL_FAILED'])
fp.write(e.output.strip())
else:
update_progress_to_db(req, role_id_list, zenic_state['UNINSTALLING'], uninstall_progress_percentage)
fp.write(exc_result)

View File

@ -0,0 +1,158 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/update endpoint for Daisy v1 API
"""
import os
import webob.exc
import subprocess
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread, Lock
import threading
from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.api.backends.zenic.common import ZenicShellExector
import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.zenic.common as zenic_cmn
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
zenic_state = zenic_cmn.ZENIC_STATE
daisy_zenic_path = zenic_cmn.daisy_zenic_path
update_zenic_progress=0.0
update_mutex = threading.Lock()
def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.0):
"""
Write update progress and status to db, we use global lock object 'update_mutex'
to make sure this function is thread safety.
:param req: http req.
:param role_id_list: Column neeb be update in role table.
:param status: Update status.
:return:
"""
global update_mutex
global update_zenic_progress
update_mutex.acquire(True)
update_zenic_progress += progress_percentage_step
role = {}
for role_id in role_id_list:
if 0 == cmp(status, zenic_state['UPDATING']):
role['status'] = status
role['progress'] = update_zenic_progress
if 0 == cmp(status, zenic_state['UPDATE_FAILED']):
role['status'] = status
elif 0 == cmp(status, zenic_state['ACTIVE']):
role['status'] = status
role['progress'] = 100
daisy_cmn.update_role(req, role_id, role)
update_mutex.release()
def thread_bin(req, host,role_id_list,update_progress_percentage):
(zenic_version_pkg_file,zenic_version_pkg_name) = zenic_cmn.check_and_get_zenic_version(daisy_zenic_path)
if not zenic_version_pkg_file:
self.state = zenic_state['INSTALL_FAILED']
self.message = "ZENIC version file not found in %s" % daisy_zenic_path
raise exception.NotFound(message=self.message)
host_ip = host['mgtip']
password = host['rootpwd']
cmd = 'mkdir -p /var/log/daisy/daisy_upgrade/'
daisy_cmn.subprocess_call(cmd)
var_log_path = "/var/log/daisy/daisy_upgrade/%s_upgrade_zenic.log" % host_ip
with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd,fp)
cmd = 'clush -S -b -w %s /home/zenic/node_stop.sh' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp)
cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % (host_ip,zenic_version_pkg_name)
daisy_cmn.subprocess_call(cmd,fp)
cmd = 'clush -S -b -w %s rm -rf /home/workspace/unipack' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp)
try:
exc_result = subprocess.check_output(
'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % (zenic_version_pkg_file,host_ip,),
shell=True, stderr=fp)
except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("scp zenic pkg for %s failed!" % host_ip))
fp.write(e.output.strip())
exit()
else:
LOG.info(_("scp zenic pkg for %s successfully!" % host_ip))
fp.write(exc_result)
cmd = 'clush -S -b -w %s unzip /home/workspace/%s -d /home/workspace/unipack' % (host_ip,zenic_version_pkg_name,)
daisy_cmn.subprocess_call(cmd)
try:
exc_result = subprocess.check_output(
'clush -S -b -w %s /home/workspace/unipack/node_upgrade.sh' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['UPDATE_FAILED'])
LOG.info(_("Upgrade zenic for %s failed!" % host_ip))
fp.write(e.output.strip())
else:
update_progress_to_db(req, role_id_list, zenic_state['UPDATING'], update_progress_percentage)
LOG.info(_("Upgrade zenic for %s successfully!" % host_ip))
fp.write(exc_result)
try:
exc_result = subprocess.check_output(
'clush -S -b -w %s /home/zenic/node_start.sh' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['UPDATE_FAILED'])
LOG.info(_("Start zenic for %s failed!" % host_ip))
fp.write(e.output.strip())
else:
update_progress_to_db(req, role_id_list, zenic_state['UPDATING'], update_progress_percentage)
LOG.info(_("Start zenic for %s successfully!" % host_ip))
fp.write(exc_result)

View File

@ -0,0 +1,125 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Controller for Image Cache Management API
"""
import webob.exc
from daisy.api import policy
from daisy.api.v1 import controller
from daisy.common import exception
from daisy.common import wsgi
from daisy import image_cache
class Controller(controller.BaseController):
"""
A controller for managing cached images.
"""
def __init__(self):
self.cache = image_cache.ImageCache()
self.policy = policy.Enforcer()
def _enforce(self, req):
"""Authorize request against 'manage_image_cache' policy"""
try:
self.policy.enforce(req.context, 'manage_image_cache', {})
except exception.Forbidden:
raise webob.exc.HTTPForbidden()
def get_cached_images(self, req):
"""
GET /cached_images
Returns a mapping of records about cached images.
"""
self._enforce(req)
images = self.cache.get_cached_images()
return dict(cached_images=images)
def delete_cached_image(self, req, image_id):
"""
DELETE /cached_images/<IMAGE_ID>
Removes an image from the cache.
"""
self._enforce(req)
self.cache.delete_cached_image(image_id)
def delete_cached_images(self, req):
"""
DELETE /cached_images - Clear all active cached images
Removes all images from the cache.
"""
self._enforce(req)
return dict(num_deleted=self.cache.delete_all_cached_images())
def get_queued_images(self, req):
"""
GET /queued_images
Returns a mapping of records about queued images.
"""
self._enforce(req)
images = self.cache.get_queued_images()
return dict(queued_images=images)
def queue_image(self, req, image_id):
"""
PUT /queued_images/<IMAGE_ID>
Queues an image for caching. We do not check to see if
the image is in the registry here. That is done by the
prefetcher...
"""
self._enforce(req)
self.cache.queue_image(image_id)
def delete_queued_image(self, req, image_id):
"""
DELETE /queued_images/<IMAGE_ID>
Removes an image from the cache.
"""
self._enforce(req)
self.cache.delete_queued_image(image_id)
def delete_queued_images(self, req):
"""
DELETE /queued_images - Clear all active queued images
Removes all images from the cache.
"""
self._enforce(req)
return dict(num_deleted=self.cache.delete_all_queued_images())
class CachedImageDeserializer(wsgi.JSONRequestDeserializer):
pass
class CachedImageSerializer(wsgi.JSONResponseSerializer):
pass
def create_resource():
"""Cached Images resource factory method"""
deserializer = CachedImageDeserializer()
serializer = CachedImageSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

220
code/daisy/daisy/api/common.py Executable file
View File

@ -0,0 +1,220 @@
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from daisy.common import exception
from daisy.common import wsgi
from daisy import i18n
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
CONF = cfg.CONF
_CACHED_THREAD_POOL = {}
def size_checked_iter(response, image_meta, expected_size, image_iter,
notifier):
image_id = image_meta['id']
bytes_written = 0
def notify_image_sent_hook(env):
image_send_notification(bytes_written, expected_size,
image_meta, response.request, notifier)
# Add hook to process after response is fully sent
if 'eventlet.posthooks' in response.request.environ:
response.request.environ['eventlet.posthooks'].append(
(notify_image_sent_hook, (), {}))
try:
for chunk in image_iter:
yield chunk
bytes_written += len(chunk)
except Exception as err:
with excutils.save_and_reraise_exception():
msg = (_LE("An error occurred reading from backend storage for "
"image %(image_id)s: %(err)s") % {'image_id': image_id,
'err': err})
LOG.error(msg)
if expected_size != bytes_written:
msg = (_LE("Backend storage for image %(image_id)s "
"disconnected after writing only %(bytes_written)d "
"bytes") % {'image_id': image_id,
'bytes_written': bytes_written})
LOG.error(msg)
raise exception.DaisyException(_("Corrupt image download for "
"image %(image_id)s") %
{'image_id': image_id})
def image_send_notification(bytes_written, expected_size, image_meta, request,
notifier):
"""Send an image.send message to the notifier."""
try:
context = request.context
payload = {
'bytes_sent': bytes_written,
'image_id': image_meta['id'],
'owner_id': image_meta['owner'],
'receiver_tenant_id': context.tenant,
'receiver_user_id': context.user,
'destination_ip': request.remote_addr,
}
if bytes_written != expected_size:
notify = notifier.error
else:
notify = notifier.info
notify('image.send', payload)
except Exception as err:
msg = (_LE("An error occurred during image.send"
" notification: %(err)s") % {'err': err})
LOG.error(msg)
def get_remaining_quota(context, db_api, image_id=None):
"""Method called to see if the user is allowed to store an image.
Checks if it is allowed based on the given size in glance based on their
quota and current usage.
:param context:
:param db_api: The db_api in use for this configuration
:param image_id: The image that will be replaced with this new data size
:return: The number of bytes the user has remaining under their quota.
None means infinity
"""
# NOTE(jbresnah) in the future this value will come from a call to
# keystone.
users_quota = CONF.user_storage_quota
# set quota must have a number optionally followed by B, KB, MB,
# GB or TB without any spaces in between
pattern = re.compile('^(\d+)((K|M|G|T)?B)?$')
match = pattern.match(users_quota)
if not match:
LOG.error(_LE("Invalid value for option user_storage_quota: "
"%(users_quota)s")
% {'users_quota': users_quota})
raise exception.InvalidOptionValue(option='user_storage_quota',
value=users_quota)
quota_value, quota_unit = (match.groups())[0:2]
# fall back to Bytes if user specified anything other than
# permitted values
quota_unit = quota_unit or "B"
factor = getattr(units, quota_unit.replace('B', 'i'), 1)
users_quota = int(quota_value) * factor
if users_quota <= 0:
return
usage = db_api.user_get_storage_usage(context,
context.owner,
image_id=image_id)
return users_quota - usage
def check_quota(context, image_size, db_api, image_id=None):
"""Method called to see if the user is allowed to store an image.
Checks if it is allowed based on the given size in glance based on their
quota and current usage.
:param context:
:param image_size: The size of the image we hope to store
:param db_api: The db_api in use for this configuration
:param image_id: The image that will be replaced with this new data size
:return:
"""
remaining = get_remaining_quota(context, db_api, image_id=image_id)
if remaining is None:
return
user = getattr(context, 'user', '<unknown>')
if image_size is None:
# NOTE(jbresnah) When the image size is None it means that it is
# not known. In this case the only time we will raise an
# exception is when there is no room left at all, thus we know
# it will not fit
if remaining <= 0:
LOG.warn(_LW("User %(user)s attempted to upload an image of"
" unknown size that will exceed the quota."
" %(remaining)d bytes remaining.")
% {'user': user, 'remaining': remaining})
raise exception.StorageQuotaFull(image_size=image_size,
remaining=remaining)
return
if image_size > remaining:
LOG.warn(_LW("User %(user)s attempted to upload an image of size"
" %(size)d that will exceed the quota. %(remaining)d"
" bytes remaining.")
% {'user': user, 'size': image_size, 'remaining': remaining})
raise exception.StorageQuotaFull(image_size=image_size,
remaining=remaining)
return remaining
def memoize(lock_name):
def memoizer_wrapper(func):
@lockutils.synchronized(lock_name)
def memoizer(lock_name):
if lock_name not in _CACHED_THREAD_POOL:
_CACHED_THREAD_POOL[lock_name] = func()
return _CACHED_THREAD_POOL[lock_name]
return memoizer(lock_name)
return memoizer_wrapper
def get_thread_pool(lock_name, size=1024):
"""Initializes eventlet thread pool.
If thread pool is present in cache, then returns it from cache
else create new pool, stores it in cache and return newly created
pool.
@param lock_name: Name of the lock.
@param size: Size of eventlet pool.
@return: eventlet pool
"""
@memoize(lock_name)
def _get_thread_pool():
return wsgi.get_asynchronous_eventlet_pool(size=size)
return _get_thread_pool

View File

View File

@ -0,0 +1,143 @@
import subprocess
import daisy.registry.client.v1.api as registry
from daisy.api.backends.tecs import config
from oslo_log import log as logging
import webob.exc
LOG = logging.getLogger(__name__)
CONFIG_MAP = {
'cinder_config': '/etc/cinder/cinder.conf',
'cinder_api_paste_ini': '/etc/cinder/api-paste.ini',
'glance_api_config': '/etc/glance/glance-api.conf',
'glance_api_paste_ini': '/etc/glance/glance-api-paste.ini',
}
class config_clushshell():
""" Class for clush backend."""
def __init__(self, req, role_id):
if not req and not role_id:
LOG.error("<<<config_clushshell:push_config input params is invalid.>>>")
return
self.context = req.context
self.role_id = role_id
self.CLUSH_CMD = "clush -S -w %(management_ip)s \"%(sub_command)s\""
self.SUB_COMMAND = "openstack-config --set %(config_file)s %(section)s %(key)s %(value)s"
def _openstack_set_config(self, host_ip, config_set):
"""
Set all config items on one host
:param host_ip:
:param config_set:
:return:
"""
if not host_ip or not config_set:
LOG.debug('<<<FUN:_openstack_set_config input params invalid.>>>')
return
sub_command_by_one_host = []
for config in config_set['config']:
if config['config_version'] == config['running_version']:
continue
config_file = registry.get_config_file_metadata(self.context, config['config_file_id'])
sub_command_by_one_host.append(
self.SUB_COMMAND % \
{'config_file':config_file['name'] ,'section':config['section'],
'key':config['key'], 'value':config['value']})
try:
sub_command_by_one_host = ";".join(sub_command_by_one_host)
clush_cmd = self.CLUSH_CMD % {'management_ip':host_ip, 'sub_command':sub_command_by_one_host}
subprocess.check_output(clush_cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
msg = ("<<<Host %s excute clush failed:%s!>>>" % (host_ip, e.output.strip()))
LOG.exception(msg)
raise webob.exc.HTTPServerError(explanation=msg)
else:
msg = ("<<<Host %s excute clush successful!>>>" % host_ip)
LOG.info(msg)
config['running_version'] = config['config_version']
def push_config(self):
"""
Push config to remote host.
:param req: http req
:param role_id: host role id
:return:
"""
self.role_info = registry.get_role_metadata(self.context, self.role_id)
if not self.role_info or not self.role_info.get('config_set_id'):
LOG.error("<<<config_clushshell:push_config,get_role_metadata failed.>>>")
return
config_set = registry.get_config_set_metadata(self.context, self.role_info['config_set_id'])
if not config_set or not config_set.has_key('config'):
LOG.info("<<<config_clushshell:push_config,get_config_set_metadata failed.>>>")
return
config_set['config'] = \
[config for config in config_set['config']
if config.has_key('config_version') and config.has_key('running_version')
and config['config_version'] != config['running_version']]
if not config_set['config']:
LOG.info('<<<No config need to be modified, within the scope of the hosts in role_id:%s.>>>' %
self.role_id)
return
self.role_hosts = registry.get_role_host_metadata(self.context, self.role_id)
current_count = 0
all_host_config_sets = []
for role_host in self.role_hosts:
host = registry.get_host_metadata(self.context, role_host['host_id'])
#change by 10166727--------start-------------
host_ip=[]
for interface in host['interfaces']:
find_flag=interface['ip'].find(':')
if find_flag<0:
host_ip=[interface['ip']]
else:
ip_list_tmp=interface['ip'].split(",")
for ip_list in ip_list_tmp:
if ip_list.split(':')[0] == "MANAGEMENT":
host_ip=[str(ip_list.split(':')[1])]
#change by 10166727--------end---------------
if not host_ip:
continue
host_ip = host_ip[0]
if 0 != subprocess.call('/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, 'ossdbg1'),
shell=True,
stderr=subprocess.STDOUT):
raise Exception("trustme.sh error!")
if not config_set.has_key("config"):
continue
self._openstack_set_config(host_ip, config_set)
all_host_config_sets.append(config_set)
registry.update_configs_metadata_by_role_hosts(self.context, all_host_config_sets)
LOG.debug("Update config for host:%s successfully!" % host_ip)
self._host_service_restart(host_ip)
current_count +=1
self.role_info['config_set_update_progress'] = round(current_count*1.0/len(self.role_hosts), 2)*100
registry.update_role_metadata(self.context, self.role_id, self.role_info)
def _host_service_restart(self,host_ip):
""" """
for service in self.role_info['service_name']:
for service_detail_name in config.service_map.get(service).split(','):
cmd = ""
if self.role_info['name'] == "CONTROLLER_HA":
cmd = "clush -S -w %s [ `systemctl is-active %s` != 'active' ] && systemctl restart %s" % \
(host_ip, service_detail_name, service_detail_name)
else:
cmd = "clush -S -w %s systemctl restart %s" % (host_ip, service_detail_name)
if 0 != subprocess.call(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
LOG.error("Service %s restart failed in host:%s." % (service_detail_name, host_ip))

View File

@ -0,0 +1,16 @@
from daisy.api.configset.clush import config_clushshell
class configBackend():
def __init__(self, type, req, role_id):
self.type = type
self._instance = None
if type == "clushshell":
self._instance = config_clushshell(req, role_id)
elif type == "puppet":
pass
def push_config(self):
self._instance.push_config()

View File

View File

View File

@ -0,0 +1,323 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Transparent image file caching middleware, designed to live on
Glance API nodes. When images are requested from the API node,
this middleware caches the returned image file to local filesystem.
When subsequent requests for the same image file are received,
the local cached copy of the image file is returned.
"""
import re
from oslo_log import log as logging
import webob
from daisy.api.common import size_checked_iter
from daisy.api import policy
from daisy.api.v1 import images
from daisy.common import exception
from daisy.common import utils
from daisy.common import wsgi
import daisy.db
from daisy import i18n
from daisy import image_cache
from daisy import notifier
import daisy.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_LI = i18n._LI
_LE = i18n._LE
_LW = i18n._LW
PATTERNS = {
('v1', 'GET'): re.compile(r'^/v1/images/([^\/]+)$'),
('v1', 'DELETE'): re.compile(r'^/v1/images/([^\/]+)$'),
('v2', 'GET'): re.compile(r'^/v2/images/([^\/]+)/file$'),
('v2', 'DELETE'): re.compile(r'^/v2/images/([^\/]+)$')
}
class CacheFilter(wsgi.Middleware):
def __init__(self, app):
self.cache = image_cache.ImageCache()
self.serializer = images.ImageSerializer()
self.policy = policy.Enforcer()
LOG.info(_LI("Initialized image cache middleware"))
super(CacheFilter, self).__init__(app)
def _verify_metadata(self, image_meta):
"""
Sanity check the 'deleted' and 'size' metadata values.
"""
# NOTE: admins can see image metadata in the v1 API, but shouldn't
# be able to download the actual image data.
if image_meta['status'] == 'deleted' and image_meta['deleted']:
raise exception.NotFound()
if not image_meta['size']:
# override image size metadata with the actual cached
# file size, see LP Bug #900959
image_meta['size'] = self.cache.get_image_size(image_meta['id'])
@staticmethod
def _match_request(request):
"""Determine the version of the url and extract the image id
:returns tuple of version and image id if the url is a cacheable,
otherwise None
"""
for ((version, method), pattern) in PATTERNS.items():
if request.method != method:
continue
match = pattern.match(request.path_info)
if match is None:
continue
image_id = match.group(1)
# Ensure the image id we got looks like an image id to filter
# out a URI like /images/detail. See LP Bug #879136
if image_id != 'detail':
return (version, method, image_id)
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg, request=req)
def _get_v1_image_metadata(self, request, image_id):
"""
Retrieves image metadata using registry for v1 api and creates
dictionary-like mash-up of image core and custom properties.
"""
try:
image_metadata = registry.get_image_metadata(request.context,
image_id)
return utils.create_mashup_dict(image_metadata)
except exception.NotFound as e:
LOG.debug("No metadata found for image '%s'" % image_id)
raise webob.exc.HTTPNotFound(explanation=e.msg, request=request)
def _get_v2_image_metadata(self, request, image_id):
"""
Retrieves image and for v2 api and creates adapter like object
to access image core or custom properties on request.
"""
db_api = daisy.db.get_api()
image_repo = daisy.db.ImageRepo(request.context, db_api)
try:
image = image_repo.get(image_id)
# Storing image object in request as it is required in
# _process_v2_request call.
request.environ['api.cache.image'] = image
return policy.ImageTarget(image)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg, request=request)
def process_request(self, request):
"""
For requests for an image file, we check the local image
cache. If present, we return the image file, appending
the image metadata in headers. If not present, we pass
the request on to the next application in the pipeline.
"""
match = self._match_request(request)
try:
(version, method, image_id) = match
except TypeError:
# Trying to unpack None raises this exception
return None
self._stash_request_info(request, image_id, method, version)
if request.method != 'GET' or not self.cache.is_cached(image_id):
return None
method = getattr(self, '_get_%s_image_metadata' % version)
image_metadata = method(request, image_id)
# Deactivated images shall not be served from cache
if image_metadata['status'] == 'deactivated':
return None
try:
self._enforce(request, 'download_image', target=image_metadata)
except exception.Forbidden:
return None
LOG.debug("Cache hit for image '%s'", image_id)
image_iterator = self.get_from_cache(image_id)
method = getattr(self, '_process_%s_request' % version)
try:
return method(request, image_id, image_iterator, image_metadata)
except exception.NotFound:
msg = _LE("Image cache contained image file for image '%s', "
"however the registry did not contain metadata for "
"that image!") % image_id
LOG.error(msg)
self.cache.delete_cached_image(image_id)
@staticmethod
def _stash_request_info(request, image_id, method, version):
"""
Preserve the image id, version and request method for later retrieval
"""
request.environ['api.cache.image_id'] = image_id
request.environ['api.cache.method'] = method
request.environ['api.cache.version'] = version
@staticmethod
def _fetch_request_info(request):
"""
Preserve the cached image id, version for consumption by the
process_response method of this middleware
"""
try:
image_id = request.environ['api.cache.image_id']
method = request.environ['api.cache.method']
version = request.environ['api.cache.version']
except KeyError:
return None
else:
return (image_id, method, version)
def _process_v1_request(self, request, image_id, image_iterator,
image_meta):
# Don't display location
if 'location' in image_meta:
del image_meta['location']
image_meta.pop('location_data', None)
self._verify_metadata(image_meta)
response = webob.Response(request=request)
raw_response = {
'image_iterator': image_iterator,
'image_meta': image_meta,
}
return self.serializer.show(response, raw_response)
def _process_v2_request(self, request, image_id, image_iterator,
image_meta):
# We do some contortions to get the image_metadata so
# that we can provide it to 'size_checked_iter' which
# will generate a notification.
# TODO(mclaren): Make notification happen more
# naturally once caching is part of the domain model.
image = request.environ['api.cache.image']
self._verify_metadata(image_meta)
response = webob.Response(request=request)
response.app_iter = size_checked_iter(response, image_meta,
image_meta['size'],
image_iterator,
notifier.Notifier())
# NOTE (flwang): Set the content-type, content-md5 and content-length
# explicitly to be consistent with the non-cache scenario.
# Besides, it's not worth the candle to invoke the "download" method
# of ResponseSerializer under image_data. Because method "download"
# will reset the app_iter. Then we have to call method
# "size_checked_iter" to avoid missing any notification. But after
# call "size_checked_iter", we will lose the content-md5 and
# content-length got by the method "download" because of this issue:
# https://github.com/Pylons/webob/issues/86
response.headers['Content-Type'] = 'application/octet-stream'
response.headers['Content-MD5'] = image.checksum
response.headers['Content-Length'] = str(image.size)
return response
def process_response(self, resp):
"""
We intercept the response coming back from the main
images Resource, removing image file from the cache
if necessary
"""
status_code = self.get_status_code(resp)
if not 200 <= status_code < 300:
return resp
try:
(image_id, method, version) = self._fetch_request_info(
resp.request)
except TypeError:
return resp
if method == 'GET' and status_code == 204:
# Bugfix:1251055 - Don't cache non-existent image files.
# NOTE: Both GET for an image without locations and DELETE return
# 204 but DELETE should be processed.
return resp
method_str = '_process_%s_response' % method
try:
process_response_method = getattr(self, method_str)
except AttributeError:
LOG.error(_LE('could not find %s') % method_str)
# Nothing to do here, move along
return resp
else:
return process_response_method(resp, image_id, version=version)
def _process_DELETE_response(self, resp, image_id, version=None):
if self.cache.is_cached(image_id):
LOG.debug("Removing image %s from cache", image_id)
self.cache.delete_cached_image(image_id)
return resp
def _process_GET_response(self, resp, image_id, version=None):
image_checksum = resp.headers.get('Content-MD5')
if not image_checksum:
# API V1 stores the checksum in a different header:
image_checksum = resp.headers.get('x-image-meta-checksum')
if not image_checksum:
LOG.error(_LE("Checksum header is missing."))
# fetch image_meta on the basis of version
image_metadata = None
if version:
method = getattr(self, '_get_%s_image_metadata' % version)
image_metadata = method(resp.request, image_id)
# NOTE(zhiyan): image_cache return a generator object and set to
# response.app_iter, it will be called by eventlet.wsgi later.
# So we need enforce policy firstly but do it by application
# since eventlet.wsgi could not catch webob.exc.HTTPForbidden and
# return 403 error to client then.
self._enforce(resp.request, 'download_image', target=image_metadata)
resp.app_iter = self.cache.get_caching_iter(image_id, image_checksum,
resp.app_iter)
return resp
def get_status_code(self, response):
"""
Returns the integer status code from the response, which
can be either a Webob.Response (used in testing) or httplib.Response
"""
if hasattr(response, 'status_int'):
return response.status_int
return response.status
def get_from_cache(self, image_id):
"""Called if cache hit"""
with self.cache.open_for_read(image_id) as cache_file:
chunks = utils.chunkiter(cache_file)
for chunk in chunks:
yield chunk

View File

@ -0,0 +1,85 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Image Cache Management API
"""
from oslo_log import log as logging
import routes
from daisy.api import cached_images
from daisy.common import wsgi
from daisy import i18n
LOG = logging.getLogger(__name__)
_LI = i18n._LI
class CacheManageFilter(wsgi.Middleware):
def __init__(self, app):
mapper = routes.Mapper()
resource = cached_images.create_resource()
mapper.connect("/v1/cached_images",
controller=resource,
action="get_cached_images",
conditions=dict(method=["GET"]))
mapper.connect("/v1/cached_images/{image_id}",
controller=resource,
action="delete_cached_image",
conditions=dict(method=["DELETE"]))
mapper.connect("/v1/cached_images",
controller=resource,
action="delete_cached_images",
conditions=dict(method=["DELETE"]))
mapper.connect("/v1/queued_images/{image_id}",
controller=resource,
action="queue_image",
conditions=dict(method=["PUT"]))
mapper.connect("/v1/queued_images",
controller=resource,
action="get_queued_images",
conditions=dict(method=["GET"]))
mapper.connect("/v1/queued_images/{image_id}",
controller=resource,
action="delete_queued_image",
conditions=dict(method=["DELETE"]))
mapper.connect("/v1/queued_images",
controller=resource,
action="delete_queued_images",
conditions=dict(method=["DELETE"]))
self._mapper = mapper
self._resource = resource
LOG.info(_LI("Initialized image cache management middleware"))
super(CacheManageFilter, self).__init__(app)
def process_request(self, request):
# Map request to our resource object if we can handle it
match = self._mapper.match(request.path_info, request.environ)
if match:
request.environ['wsgiorg.routing_args'] = (None, match)
return self._resource(request)
# Pass off downstream if we don't match the request path
else:
return None

View File

@ -0,0 +1,137 @@
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.serialization import jsonutils
from oslo_config import cfg
from oslo_log import log as logging
import webob.exc
from daisy.api import policy
from daisy.common import wsgi
import daisy.context
from daisy import i18n
_ = i18n._
context_opts = [
cfg.BoolOpt('owner_is_tenant', default=True,
help=_('When true, this option sets the owner of an image '
'to be the tenant. Otherwise, the owner of the '
' image will be the authenticated user issuing the '
'request.')),
cfg.StrOpt('admin_role', default='admin',
help=_('Role used to identify an authenticated user as '
'administrator.')),
cfg.BoolOpt('allow_anonymous_access', default=False,
help=_('Allow unauthenticated users to access the API with '
'read-only privileges. This only applies when using '
'ContextMiddleware.')),
]
CONF = cfg.CONF
CONF.register_opts(context_opts)
LOG = logging.getLogger(__name__)
class BaseContextMiddleware(wsgi.Middleware):
def process_response(self, resp):
try:
request_id = resp.request.context.request_id
except AttributeError:
LOG.warn(_('Unable to retrieve request id from context'))
else:
resp.headers['x-openstack-request-id'] = 'req-%s' % request_id
return resp
class ContextMiddleware(BaseContextMiddleware):
def __init__(self, app):
self.policy_enforcer = policy.Enforcer()
super(ContextMiddleware, self).__init__(app)
def process_request(self, req):
"""Convert authentication information into a request context
Generate a daisy.context.RequestContext object from the available
authentication headers and store on the 'context' attribute
of the req object.
:param req: wsgi request object that will be given the context object
:raises webob.exc.HTTPUnauthorized: when value of the X-Identity-Status
header is not 'Confirmed' and
anonymous access is disallowed
"""
if req.headers.get('X-Identity-Status') == 'Confirmed':
req.context = self._get_authenticated_context(req)
elif CONF.allow_anonymous_access:
req.context = self._get_anonymous_context()
else:
raise webob.exc.HTTPUnauthorized()
def _get_anonymous_context(self):
kwargs = {
'user': None,
'tenant': None,
'roles': [],
'is_admin': False,
'read_only': True,
'policy_enforcer': self.policy_enforcer,
}
return daisy.context.RequestContext(**kwargs)
def _get_authenticated_context(self, req):
# NOTE(bcwaldon): X-Roles is a csv string, but we need to parse
# it into a list to be useful
roles_header = req.headers.get('X-Roles', '')
roles = [r.strip().lower() for r in roles_header.split(',')]
# NOTE(bcwaldon): This header is deprecated in favor of X-Auth-Token
deprecated_token = req.headers.get('X-Storage-Token')
service_catalog = None
if req.headers.get('X-Service-Catalog') is not None:
try:
catalog_header = req.headers.get('X-Service-Catalog')
service_catalog = jsonutils.loads(catalog_header)
except ValueError:
raise webob.exc.HTTPInternalServerError(
_('Invalid service catalog json.'))
kwargs = {
'user': req.headers.get('X-User-Id'),
'tenant': req.headers.get('X-Tenant-Id'),
'roles': roles,
'is_admin': CONF.admin_role.strip().lower() in roles,
'auth_token': req.headers.get('X-Auth-Token', deprecated_token),
'owner_is_tenant': CONF.owner_is_tenant,
'service_catalog': service_catalog,
'policy_enforcer': self.policy_enforcer,
}
return daisy.context.RequestContext(**kwargs)
class UnauthenticatedContextMiddleware(BaseContextMiddleware):
def process_request(self, req):
"""Create a context without an authorized user."""
kwargs = {
'user': None,
'tenant': None,
'roles': [],
'is_admin': True,
}
req.context = daisy.context.RequestContext(**kwargs)

View File

@ -0,0 +1,66 @@
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Use gzip compression if the client accepts it.
"""
import re
from oslo_log import log as logging
from daisy.common import wsgi
from daisy import i18n
LOG = logging.getLogger(__name__)
_LI = i18n._LI
class GzipMiddleware(wsgi.Middleware):
re_zip = re.compile(r'\bgzip\b')
def __init__(self, app):
LOG.info(_LI("Initialized gzip middleware"))
super(GzipMiddleware, self).__init__(app)
def process_response(self, response):
request = response.request
accept_encoding = request.headers.get('Accept-Encoding', '')
if self.re_zip.search(accept_encoding):
# NOTE(flaper87): Webob removes the content-md5 when
# app_iter is called. We'll keep it and reset it later
checksum = response.headers.get("Content-MD5")
# NOTE(flaper87): We'll use lazy for images so
# that they can be compressed without reading
# the whole content in memory. Notice that using
# lazy will set response's content-length to 0.
content_type = response.headers["Content-Type"]
lazy = content_type == "application/octet-stream"
# NOTE(flaper87): Webob takes care of the compression
# process, it will replace the body either with a
# compressed body or a generator - used for lazy com
# pression - depending on the lazy value.
#
# Webob itself will set the Content-Encoding header.
response.encode_content(lazy=lazy)
if checksum:
response.headers['Content-MD5'] = checksum
return response

View File

@ -0,0 +1,109 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A filter middleware that inspects the requested URI for a version string
and/or Accept headers and attempts to negotiate an API controller to
return
"""
from oslo_config import cfg
from oslo_log import log as logging
from daisy.api import versions
from daisy.common import wsgi
from daisy import i18n
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ = i18n._
_LW = i18n._LW
class VersionNegotiationFilter(wsgi.Middleware):
def __init__(self, app):
self.versions_app = versions.Controller()
super(VersionNegotiationFilter, self).__init__(app)
def process_request(self, req):
"""Try to find a version first in the accept header, then the URL"""
msg = _("Determining version of request: %(method)s %(path)s"
" Accept: %(accept)s")
args = {'method': req.method, 'path': req.path, 'accept': req.accept}
LOG.debug(msg % args)
# If the request is for /versions, just return the versions container
# TODO(bcwaldon): deprecate this behavior
if req.path_info_peek() == "versions":
return self.versions_app
accept = str(req.accept)
if accept.startswith('application/vnd.openstack.images-'):
LOG.debug("Using media-type versioning")
token_loc = len('application/vnd.openstack.images-')
req_version = accept[token_loc:]
else:
LOG.debug("Using url versioning")
# Remove version in url so it doesn't conflict later
req_version = self._pop_path_info(req)
try:
version = self._match_version_string(req_version)
except ValueError:
LOG.warn(_LW("Unknown version. Returning version choices."))
return self.versions_app
req.environ['api.version'] = version
req.path_info = ''.join(('/v', str(version), req.path_info))
LOG.debug("Matched version: v%d", version)
LOG.debug('new path %s', req.path_info)
return None
def _match_version_string(self, subject):
"""
Given a string, tries to match a major and/or
minor version number.
:param subject: The string to check
:returns version found in the subject
:raises ValueError if no acceptable version could be found
"""
if subject in ('v1', 'v1.0', 'v1.1') and CONF.enable_v1_api:
major_version = 1
elif subject in ('v2', 'v2.0', 'v2.1', 'v2.2') and CONF.enable_v2_api:
major_version = 2
else:
raise ValueError()
return major_version
def _pop_path_info(self, req):
"""
'Pops' off the next segment of PATH_INFO, returns the popped
segment. Do NOT push it onto SCRIPT_NAME.
"""
path = req.path_info
if not path:
return None
while path.startswith('/'):
path = path[1:]
idx = path.find('/')
if idx == -1:
idx = len(path)
r = path[:idx]
req.path_info = path[idx:]
return r

View File

@ -0,0 +1,202 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/Initialize network configuration about neutron
"""
import time
from oslo_log import log as logging
import daisy.registry.client.v1.api as registry
from webob.exc import HTTPBadRequest
from neutronclient.v2_0 import client as clientv20
from daisy.common import exception
LOG = logging.getLogger(__name__)
class network(object):
"""
network config
"""
def __init__(self, req, neutron_host, keystone_host, cluster_id):
registry.configure_registry_client()
auth_url = 'http://' + keystone_host + ':35357/v2.0'
end_url = 'http://' + neutron_host + ':9696'
params = {'username': 'admin',
'ca_cert': None,
'tenant_name': 'admin',
'insecure': False,
'auth_url': auth_url,
'timeout': 30,
'password': 'keystone',
'endpoint_url': end_url,
'auth_strategy': 'keystone'
}
self.cluster_id = cluster_id
self.neutron = clientv20.Client(**params)
try:
cluster = registry.get_cluster_metadata(req.context, cluster_id)
except exception.Invalid as e:
LOG.exception(e.msg)
raise HTTPBadRequest(explanation=e.msg, request=req)
LOG.info("<<<CLUSTER:%s,NEUTRON HOST:%s,KEYSTOEN:%s>>>", cluster, neutron_host, keystone_host)
if 'logic_networks' in cluster and cluster['logic_networks'] is not None:
self.nets = cluster['logic_networks']
#self._flat_network_uniqueness_check()
if 'routers' in cluster and cluster['routers'] is not None:
self.routers = cluster['routers']
else:
self.routers = []
self._network_check()
self.name_mappings = {}
self._network_config()
def _router_create(self, name):
body = {}
body['router'] = {"name": name, "admin_state_up": True}
router = self.neutron.create_router(body)
return router['router']['id']
def _subnet_create(self, net_id, **kwargs):
body = {}
body['subnet'] = {'enable_dhcp': True,
'network_id': net_id,
'ip_version': 4
}
for k in kwargs.keys():
body['subnet'][k] = kwargs[k]
LOG.info("<<<BODY:%s>>>", body)
subnet = self.neutron.create_subnet(body)
return subnet['subnet']['id']
def _router_link(self):
for router in self.routers:
router_id = self._router_create(router['name'])
if 'external_logic_network' in router:
body = {'network_id': self.name_mappings[router['external_logic_network']]}
self.neutron.add_gateway_router(router_id, body)
if 'subnets' in router:
for i in router['subnets']:
body = {'subnet_id': self.name_mappings[i]}
self.neutron.add_interface_router(router_id, body)
def _net_subnet_same_router_check(self, ex_network, subnet):
for router in self.routers:
if 'external_logic_network' in router and router['external_logic_network'] == ex_network:
if 'subnets' in router:
for i in router['subnets']:
if i == subnet:
return True
return False
def _subnet_check_and_create(self, net_id, subnet):
kwargs = {}
key_list = ['name', 'cidr', 'floating_ranges', 'dns_nameservers']
for i in key_list:
if i not in subnet:
raise exception.Invalid()
kwargs['name'] = subnet['name']
kwargs['cidr'] = subnet['cidr']
if len(subnet['dns_nameservers']) != 0:
kwargs['dns_nameservers'] = subnet['dns_nameservers']
kwargs['allocation_pools'] = []
if len(subnet['floating_ranges']) != 0:
for pool in subnet['floating_ranges']:
if len(pool) != 2:
raise exception.Invalid()
else:
alloc_pool = {}
alloc_pool['start'] = pool[0]
alloc_pool['end'] = pool[1]
kwargs['allocation_pools'].append(alloc_pool)
if 'gateway' in subnet and subnet['gateway'] is not None:
kwargs['gateway_ip'] = subnet['gateway']
subnet_id = self._subnet_create(net_id, **kwargs)
return subnet_id
def _network_check(self):
execute_times = 0
while True:
try:
nets = self.neutron.list_networks()
except:
LOG.info("can not connect neutron server,sleep 5s,try")
time.sleep(5)
execute_times += 1
if execute_times >= 60:
LOG.info("connect neutron server failed")
break
else:
LOG.info("connect neutron server sucessful")
if 'networks' in nets and len(nets['networks']) > 0:
raise exception.Invalid()
break
def _flat_network_uniqueness_check(self):
flat_mapping = []
for net in self.nets:
if net['physnet_name'] in flat_mapping:
raise exception.Invalid()
else:
if net['segmentation_type'].strip() == 'flat':
flat_mapping.append(net['physnet_name'])
def _network_config(self):
for net in self.nets:
body = {}
if net['type'] == 'external':
body['network'] = {'name': net['name'],
'router:external': True,
'provider:network_type': net['segmentation_type']}
if net['segmentation_type'].strip() == 'flat':
body['network']['provider:physical_network'] = net['physnet_name']
elif net['segmentation_type'].strip() == 'vxlan':
if 'segmentation_id' in net and net['segmentation_id'] is not None:
body['network']['provider:segmentation_id'] = net['segmentation_id']
else:
if 'segmentation_id' in net and net['segmentation_id'] is not None:
body['network']['provider:segmentation_id'] = net['segmentation_id']
body['network']['provider:physical_network'] = net['physnet_name']
if net['shared']:
body['network']['shared'] = True
else:
body['network']['shared'] = False
external = self.neutron.create_network(body)
self.name_mappings[net['name']] = external['network']['id']
last_create_subnet = []
for subnet in net['subnets']:
if self._net_subnet_same_router_check(net['name'], subnet['name']):
last_create_subnet.append(subnet)
else:
subnet_id = self._subnet_check_and_create(external['network']['id'], subnet)
self.name_mappings[subnet['name']] = subnet_id
for subnet in last_create_subnet:
subnet_id = self._subnet_check_and_create(external['network']['id'], subnet)
self.name_mappings[subnet['name']] = subnet_id
else:
body['network'] = {'name': net['name'],
'provider:network_type': net['segmentation_type']}
if net['segmentation_type'].strip() == 'vlan':
body['network']['provider:physical_network'] = net['physnet_name']
if 'segmentation_id' in net and net['segmentation_id'] is not None:
body['network']['provider:segmentation_id'] = net['segmentation_id']
if net['shared']:
body['network']['shared'] = True
else:
body['network']['shared'] = False
inner = self.neutron.create_network(body)
self.name_mappings[net['name']] = inner['network']['id']
for subnet in net['subnets']:
subnet_id = self._subnet_check_and_create(inner['network']['id'], subnet)
self.name_mappings[subnet['name']] = subnet_id
self._router_link()

699
code/daisy/daisy/api/policy.py Executable file
View File

@ -0,0 +1,699 @@
# Copyright (c) 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy Engine For Glance"""
import copy
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy
from daisy.common import exception
import daisy.domain.proxy
from daisy import i18n
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
DEFAULT_RULES = policy.Rules.from_dict({
'context_is_admin': 'role:admin',
'default': '@',
'manage_image_cache': 'role:admin',
})
_ = i18n._
_LI = i18n._LI
_LW = i18n._LW
class Enforcer(policy.Enforcer):
"""Responsible for loading and enforcing rules"""
def __init__(self):
if CONF.find_file(CONF.oslo_policy.policy_file):
kwargs = dict(rules=None, use_conf=True)
else:
kwargs = dict(rules=DEFAULT_RULES, use_conf=False)
super(Enforcer, self).__init__(CONF, overwrite=False, **kwargs)
def add_rules(self, rules):
"""Add new rules to the Rules object"""
self.set_rules(rules, overwrite=False, use_conf=self.use_conf)
def enforce(self, context, action, target):
"""Verifies that the action is valid on the target in this context.
:param context: Glance request context
:param action: String representing the action to be checked
:param target: Dictionary representing the object of the action.
:raises: `daisy.common.exception.Forbidden`
:returns: A non-False value if access is allowed.
"""
credentials = {
'roles': context.roles,
'user': context.user,
'tenant': context.tenant,
}
return super(Enforcer, self).enforce(action, target, credentials,
do_raise=True,
exc=exception.Forbidden,
action=action)
def check(self, context, action, target):
"""Verifies that the action is valid on the target in this context.
:param context: Glance request context
:param action: String representing the action to be checked
:param target: Dictionary representing the object of the action.
:returns: A non-False value if access is allowed.
"""
credentials = {
'roles': context.roles,
'user': context.user,
'tenant': context.tenant,
}
return super(Enforcer, self).enforce(action, target, credentials)
def check_is_admin(self, context):
"""Check if the given context is associated with an admin role,
as defined via the 'context_is_admin' RBAC rule.
:param context: Glance request context
:returns: A non-False value if context role is admin.
"""
return self.check(context, 'context_is_admin', context.to_dict())
class ImageRepoProxy(daisy.domain.proxy.Repo):
def __init__(self, image_repo, context, policy):
self.context = context
self.policy = policy
self.image_repo = image_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(ImageRepoProxy, self).__init__(image_repo,
item_proxy_class=ImageProxy,
item_proxy_kwargs=proxy_kwargs)
def get(self, image_id):
try:
image = super(ImageRepoProxy, self).get(image_id)
except exception.NotFound:
self.policy.enforce(self.context, 'get_image', {})
raise
else:
self.policy.enforce(self.context, 'get_image', ImageTarget(image))
return image
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_images', {})
return super(ImageRepoProxy, self).list(*args, **kwargs)
def save(self, image, from_state=None):
self.policy.enforce(self.context, 'modify_image', image.target)
return super(ImageRepoProxy, self).save(image, from_state=from_state)
def add(self, image):
self.policy.enforce(self.context, 'add_image', image.target)
return super(ImageRepoProxy, self).add(image)
class ImageProxy(daisy.domain.proxy.Image):
def __init__(self, image, context, policy):
self.image = image
self.target = ImageTarget(image)
self.context = context
self.policy = policy
super(ImageProxy, self).__init__(image)
@property
def visibility(self):
return self.image.visibility
@visibility.setter
def visibility(self, value):
if value == 'public':
self.policy.enforce(self.context, 'publicize_image', self.target)
self.image.visibility = value
@property
def locations(self):
return ImageLocationsProxy(self.image.locations,
self.context, self.policy)
@locations.setter
def locations(self, value):
if not isinstance(value, (list, ImageLocationsProxy)):
raise exception.Invalid(_('Invalid locations: %s') % value)
self.policy.enforce(self.context, 'set_image_location', self.target)
new_locations = list(value)
if (set([loc['url'] for loc in self.image.locations]) -
set([loc['url'] for loc in new_locations])):
self.policy.enforce(self.context, 'delete_image_location',
self.target)
self.image.locations = new_locations
def delete(self):
self.policy.enforce(self.context, 'delete_image', self.target)
return self.image.delete()
def deactivate(self):
LOG.debug('Attempting deactivate')
target = ImageTarget(self.image)
self.policy.enforce(self.context, 'deactivate', target=target)
LOG.debug('Deactivate allowed, continue')
self.image.deactivate()
def reactivate(self):
LOG.debug('Attempting reactivate')
target = ImageTarget(self.image)
self.policy.enforce(self.context, 'reactivate', target=target)
LOG.debug('Reactivate allowed, continue')
self.image.reactivate()
def get_data(self, *args, **kwargs):
self.policy.enforce(self.context, 'download_image', self.target)
return self.image.get_data(*args, **kwargs)
def set_data(self, *args, **kwargs):
self.policy.enforce(self.context, 'upload_image', self.target)
return self.image.set_data(*args, **kwargs)
def get_member_repo(self, **kwargs):
member_repo = self.image.get_member_repo(**kwargs)
return ImageMemberRepoProxy(member_repo, self.context, self.policy)
class ImageFactoryProxy(daisy.domain.proxy.ImageFactory):
def __init__(self, image_factory, context, policy):
self.image_factory = image_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(ImageFactoryProxy, self).__init__(image_factory,
proxy_class=ImageProxy,
proxy_kwargs=proxy_kwargs)
def new_image(self, **kwargs):
if kwargs.get('visibility') == 'public':
self.policy.enforce(self.context, 'publicize_image', {})
return super(ImageFactoryProxy, self).new_image(**kwargs)
class ImageMemberFactoryProxy(daisy.domain.proxy.ImageMembershipFactory):
def __init__(self, member_factory, context, policy):
super(ImageMemberFactoryProxy, self).__init__(
member_factory,
image_proxy_class=ImageProxy,
image_proxy_kwargs={'context': context, 'policy': policy})
class ImageMemberRepoProxy(daisy.domain.proxy.Repo):
def __init__(self, member_repo, context, policy):
self.member_repo = member_repo
self.target = ImageTarget(self.member_repo.image)
self.context = context
self.policy = policy
def add(self, member):
self.policy.enforce(self.context, 'add_member', self.target)
self.member_repo.add(member)
def get(self, member_id):
self.policy.enforce(self.context, 'get_member', self.target)
return self.member_repo.get(member_id)
def save(self, member, from_state=None):
self.policy.enforce(self.context, 'modify_member', self.target)
self.member_repo.save(member, from_state=from_state)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_members', self.target)
return self.member_repo.list(*args, **kwargs)
def remove(self, member):
self.policy.enforce(self.context, 'delete_member', self.target)
self.member_repo.remove(member)
class ImageLocationsProxy(object):
__hash__ = None
def __init__(self, locations, context, policy):
self.locations = locations
self.context = context
self.policy = policy
def __copy__(self):
return type(self)(self.locations, self.context, self.policy)
def __deepcopy__(self, memo):
# NOTE(zhiyan): Only copy location entries, others can be reused.
return type(self)(copy.deepcopy(self.locations, memo),
self.context, self.policy)
def _get_checker(action, func_name):
def _checker(self, *args, **kwargs):
self.policy.enforce(self.context, action, {})
method = getattr(self.locations, func_name)
return method(*args, **kwargs)
return _checker
count = _get_checker('get_image_location', 'count')
index = _get_checker('get_image_location', 'index')
__getitem__ = _get_checker('get_image_location', '__getitem__')
__contains__ = _get_checker('get_image_location', '__contains__')
__len__ = _get_checker('get_image_location', '__len__')
__cast = _get_checker('get_image_location', '__cast')
__cmp__ = _get_checker('get_image_location', '__cmp__')
__iter__ = _get_checker('get_image_location', '__iter__')
append = _get_checker('set_image_location', 'append')
extend = _get_checker('set_image_location', 'extend')
insert = _get_checker('set_image_location', 'insert')
reverse = _get_checker('set_image_location', 'reverse')
__iadd__ = _get_checker('set_image_location', '__iadd__')
__setitem__ = _get_checker('set_image_location', '__setitem__')
pop = _get_checker('delete_image_location', 'pop')
remove = _get_checker('delete_image_location', 'remove')
__delitem__ = _get_checker('delete_image_location', '__delitem__')
__delslice__ = _get_checker('delete_image_location', '__delslice__')
del _get_checker
class TaskProxy(daisy.domain.proxy.Task):
def __init__(self, task, context, policy):
self.task = task
self.context = context
self.policy = policy
super(TaskProxy, self).__init__(task)
class TaskStubProxy(daisy.domain.proxy.TaskStub):
def __init__(self, task_stub, context, policy):
self.task_stub = task_stub
self.context = context
self.policy = policy
super(TaskStubProxy, self).__init__(task_stub)
class TaskRepoProxy(daisy.domain.proxy.TaskRepo):
def __init__(self, task_repo, context, task_policy):
self.context = context
self.policy = task_policy
self.task_repo = task_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(TaskRepoProxy,
self).__init__(task_repo,
task_proxy_class=TaskProxy,
task_proxy_kwargs=proxy_kwargs)
def get(self, task_id):
self.policy.enforce(self.context, 'get_task', {})
return super(TaskRepoProxy, self).get(task_id)
def add(self, task):
self.policy.enforce(self.context, 'add_task', {})
super(TaskRepoProxy, self).add(task)
def save(self, task):
self.policy.enforce(self.context, 'modify_task', {})
super(TaskRepoProxy, self).save(task)
class TaskStubRepoProxy(daisy.domain.proxy.TaskStubRepo):
def __init__(self, task_stub_repo, context, task_policy):
self.context = context
self.policy = task_policy
self.task_stub_repo = task_stub_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(TaskStubRepoProxy,
self).__init__(task_stub_repo,
task_stub_proxy_class=TaskStubProxy,
task_stub_proxy_kwargs=proxy_kwargs)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_tasks', {})
return super(TaskStubRepoProxy, self).list(*args, **kwargs)
class TaskFactoryProxy(daisy.domain.proxy.TaskFactory):
def __init__(self, task_factory, context, policy):
self.task_factory = task_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(TaskFactoryProxy, self).__init__(
task_factory,
task_proxy_class=TaskProxy,
task_proxy_kwargs=proxy_kwargs)
class ImageTarget(object):
SENTINEL = object()
def __init__(self, target):
"""Initialize the object
:param target: Object being targetted
"""
self.target = target
def __getitem__(self, key):
"""Return the value of 'key' from the target.
If the target has the attribute 'key', return it.
:param key: value to retrieve
"""
key = self.key_transforms(key)
value = getattr(self.target, key, self.SENTINEL)
if value is self.SENTINEL:
extra_properties = getattr(self.target, 'extra_properties', None)
if extra_properties is not None:
value = extra_properties[key]
else:
value = None
return value
def key_transforms(self, key):
if key == 'id':
key = 'image_id'
return key
# Metadef Namespace classes
class MetadefNamespaceProxy(daisy.domain.proxy.MetadefNamespace):
def __init__(self, namespace, context, policy):
self.namespace_input = namespace
self.context = context
self.policy = policy
super(MetadefNamespaceProxy, self).__init__(namespace)
class MetadefNamespaceRepoProxy(daisy.domain.proxy.MetadefNamespaceRepo):
def __init__(self, namespace_repo, context, namespace_policy):
self.context = context
self.policy = namespace_policy
self.namespace_repo = namespace_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefNamespaceRepoProxy,
self).__init__(namespace_repo,
namespace_proxy_class=MetadefNamespaceProxy,
namespace_proxy_kwargs=proxy_kwargs)
def get(self, namespace):
self.policy.enforce(self.context, 'get_metadef_namespace', {})
return super(MetadefNamespaceRepoProxy, self).get(namespace)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_metadef_namespaces', {})
return super(MetadefNamespaceRepoProxy, self).list(*args, **kwargs)
def save(self, namespace):
self.policy.enforce(self.context, 'modify_metadef_namespace', {})
return super(MetadefNamespaceRepoProxy, self).save(namespace)
def add(self, namespace):
self.policy.enforce(self.context, 'add_metadef_namespace', {})
return super(MetadefNamespaceRepoProxy, self).add(namespace)
class MetadefNamespaceFactoryProxy(
daisy.domain.proxy.MetadefNamespaceFactory):
def __init__(self, meta_namespace_factory, context, policy):
self.meta_namespace_factory = meta_namespace_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefNamespaceFactoryProxy, self).__init__(
meta_namespace_factory,
meta_namespace_proxy_class=MetadefNamespaceProxy,
meta_namespace_proxy_kwargs=proxy_kwargs)
# Metadef Object classes
class MetadefObjectProxy(daisy.domain.proxy.MetadefObject):
def __init__(self, meta_object, context, policy):
self.meta_object = meta_object
self.context = context
self.policy = policy
super(MetadefObjectProxy, self).__init__(meta_object)
class MetadefObjectRepoProxy(daisy.domain.proxy.MetadefObjectRepo):
def __init__(self, object_repo, context, object_policy):
self.context = context
self.policy = object_policy
self.object_repo = object_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefObjectRepoProxy,
self).__init__(object_repo,
object_proxy_class=MetadefObjectProxy,
object_proxy_kwargs=proxy_kwargs)
def get(self, namespace, object_name):
self.policy.enforce(self.context, 'get_metadef_object', {})
return super(MetadefObjectRepoProxy, self).get(namespace, object_name)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_metadef_objects', {})
return super(MetadefObjectRepoProxy, self).list(*args, **kwargs)
def save(self, meta_object):
self.policy.enforce(self.context, 'modify_metadef_object', {})
return super(MetadefObjectRepoProxy, self).save(meta_object)
def add(self, meta_object):
self.policy.enforce(self.context, 'add_metadef_object', {})
return super(MetadefObjectRepoProxy, self).add(meta_object)
class MetadefObjectFactoryProxy(daisy.domain.proxy.MetadefObjectFactory):
def __init__(self, meta_object_factory, context, policy):
self.meta_object_factory = meta_object_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefObjectFactoryProxy, self).__init__(
meta_object_factory,
meta_object_proxy_class=MetadefObjectProxy,
meta_object_proxy_kwargs=proxy_kwargs)
# Metadef ResourceType classes
class MetadefResourceTypeProxy(daisy.domain.proxy.MetadefResourceType):
def __init__(self, meta_resource_type, context, policy):
self.meta_resource_type = meta_resource_type
self.context = context
self.policy = policy
super(MetadefResourceTypeProxy, self).__init__(meta_resource_type)
class MetadefResourceTypeRepoProxy(
daisy.domain.proxy.MetadefResourceTypeRepo):
def __init__(self, resource_type_repo, context, resource_type_policy):
self.context = context
self.policy = resource_type_policy
self.resource_type_repo = resource_type_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefResourceTypeRepoProxy, self).__init__(
resource_type_repo,
resource_type_proxy_class=MetadefResourceTypeProxy,
resource_type_proxy_kwargs=proxy_kwargs)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'list_metadef_resource_types', {})
return super(MetadefResourceTypeRepoProxy, self).list(*args, **kwargs)
def get(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_metadef_resource_type', {})
return super(MetadefResourceTypeRepoProxy, self).get(*args, **kwargs)
def add(self, resource_type):
self.policy.enforce(self.context,
'add_metadef_resource_type_association', {})
return super(MetadefResourceTypeRepoProxy, self).add(resource_type)
class MetadefResourceTypeFactoryProxy(
daisy.domain.proxy.MetadefResourceTypeFactory):
def __init__(self, resource_type_factory, context, policy):
self.resource_type_factory = resource_type_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefResourceTypeFactoryProxy, self).__init__(
resource_type_factory,
resource_type_proxy_class=MetadefResourceTypeProxy,
resource_type_proxy_kwargs=proxy_kwargs)
# Metadef namespace properties classes
class MetadefPropertyProxy(daisy.domain.proxy.MetadefProperty):
def __init__(self, namespace_property, context, policy):
self.namespace_property = namespace_property
self.context = context
self.policy = policy
super(MetadefPropertyProxy, self).__init__(namespace_property)
class MetadefPropertyRepoProxy(daisy.domain.proxy.MetadefPropertyRepo):
def __init__(self, property_repo, context, object_policy):
self.context = context
self.policy = object_policy
self.property_repo = property_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefPropertyRepoProxy, self).__init__(
property_repo,
property_proxy_class=MetadefPropertyProxy,
property_proxy_kwargs=proxy_kwargs)
def get(self, namespace, property_name):
self.policy.enforce(self.context, 'get_metadef_property', {})
return super(MetadefPropertyRepoProxy, self).get(namespace,
property_name)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_metadef_properties', {})
return super(MetadefPropertyRepoProxy, self).list(
*args, **kwargs)
def save(self, namespace_property):
self.policy.enforce(self.context, 'modify_metadef_property', {})
return super(MetadefPropertyRepoProxy, self).save(
namespace_property)
def add(self, namespace_property):
self.policy.enforce(self.context, 'add_metadef_property', {})
return super(MetadefPropertyRepoProxy, self).add(
namespace_property)
class MetadefPropertyFactoryProxy(daisy.domain.proxy.MetadefPropertyFactory):
def __init__(self, namespace_property_factory, context, policy):
self.namespace_property_factory = namespace_property_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefPropertyFactoryProxy, self).__init__(
namespace_property_factory,
property_proxy_class=MetadefPropertyProxy,
property_proxy_kwargs=proxy_kwargs)
# Metadef Tag classes
class MetadefTagProxy(daisy.domain.proxy.MetadefTag):
def __init__(self, meta_tag, context, policy):
self.context = context
self.policy = policy
super(MetadefTagProxy, self).__init__(meta_tag)
class MetadefTagRepoProxy(daisy.domain.proxy.MetadefTagRepo):
def __init__(self, tag_repo, context, tag_policy):
self.context = context
self.policy = tag_policy
self.tag_repo = tag_repo
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefTagRepoProxy,
self).__init__(tag_repo,
tag_proxy_class=MetadefTagProxy,
tag_proxy_kwargs=proxy_kwargs)
def get(self, namespace, tag_name):
self.policy.enforce(self.context, 'get_metadef_tag', {})
return super(MetadefTagRepoProxy, self).get(namespace, tag_name)
def list(self, *args, **kwargs):
self.policy.enforce(self.context, 'get_metadef_tags', {})
return super(MetadefTagRepoProxy, self).list(*args, **kwargs)
def save(self, meta_tag):
self.policy.enforce(self.context, 'modify_metadef_tag', {})
return super(MetadefTagRepoProxy, self).save(meta_tag)
def add(self, meta_tag):
self.policy.enforce(self.context, 'add_metadef_tag', {})
return super(MetadefTagRepoProxy, self).add(meta_tag)
def add_tags(self, meta_tags):
self.policy.enforce(self.context, 'add_metadef_tags', {})
return super(MetadefTagRepoProxy, self).add_tags(meta_tags)
class MetadefTagFactoryProxy(daisy.domain.proxy.MetadefTagFactory):
def __init__(self, meta_tag_factory, context, policy):
self.meta_tag_factory = meta_tag_factory
self.context = context
self.policy = policy
proxy_kwargs = {'context': self.context, 'policy': self.policy}
super(MetadefTagFactoryProxy, self).__init__(
meta_tag_factory,
meta_tag_proxy_class=MetadefTagProxy,
meta_tag_proxy_kwargs=proxy_kwargs)
# Catalog Search classes
class CatalogSearchRepoProxy(object):
def __init__(self, search_repo, context, search_policy):
self.context = context
self.policy = search_policy
self.search_repo = search_repo
def search(self, *args, **kwargs):
self.policy.enforce(self.context, 'catalog_search', {})
return self.search_repo.search(*args, **kwargs)
def plugins_info(self, *args, **kwargs):
self.policy.enforce(self.context, 'catalog_plugins', {})
return self.search_repo.plugins_info(*args, **kwargs)
def index(self, *args, **kwargs):
self.policy.enforce(self.context, 'catalog_index', {})
return self.search_repo.index(*args, **kwargs)

View File

@ -0,0 +1,126 @@
# Copyright 2013 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from daisy.common import exception
import daisy.domain.proxy
class ProtectedImageFactoryProxy(daisy.domain.proxy.ImageFactory):
def __init__(self, image_factory, context, property_rules):
self.image_factory = image_factory
self.context = context
self.property_rules = property_rules
kwargs = {'context': self.context,
'property_rules': self.property_rules}
super(ProtectedImageFactoryProxy, self).__init__(
image_factory,
proxy_class=ProtectedImageProxy,
proxy_kwargs=kwargs)
def new_image(self, **kwargs):
extra_props = kwargs.pop('extra_properties', {})
extra_properties = {}
for key in extra_props.keys():
if self.property_rules.check_property_rules(key, 'create',
self.context):
extra_properties[key] = extra_props[key]
else:
raise exception.ReservedProperty(property=key)
return super(ProtectedImageFactoryProxy, self).new_image(
extra_properties=extra_properties, **kwargs)
class ProtectedImageRepoProxy(daisy.domain.proxy.Repo):
def __init__(self, image_repo, context, property_rules):
self.context = context
self.image_repo = image_repo
self.property_rules = property_rules
proxy_kwargs = {'context': self.context}
super(ProtectedImageRepoProxy, self).__init__(
image_repo, item_proxy_class=ProtectedImageProxy,
item_proxy_kwargs=proxy_kwargs)
def get(self, image_id):
return ProtectedImageProxy(self.image_repo.get(image_id),
self.context, self.property_rules)
def list(self, *args, **kwargs):
images = self.image_repo.list(*args, **kwargs)
return [ProtectedImageProxy(image, self.context, self.property_rules)
for image in images]
class ProtectedImageProxy(daisy.domain.proxy.Image):
def __init__(self, image, context, property_rules):
self.image = image
self.context = context
self.property_rules = property_rules
self.image.extra_properties = ExtraPropertiesProxy(
self.context,
self.image.extra_properties,
self.property_rules)
super(ProtectedImageProxy, self).__init__(self.image)
class ExtraPropertiesProxy(daisy.domain.ExtraProperties):
def __init__(self, context, extra_props, property_rules):
self.context = context
self.property_rules = property_rules
extra_properties = {}
for key in extra_props.keys():
if self.property_rules.check_property_rules(key, 'read',
self.context):
extra_properties[key] = extra_props[key]
super(ExtraPropertiesProxy, self).__init__(extra_properties)
def __getitem__(self, key):
if self.property_rules.check_property_rules(key, 'read', self.context):
return dict.__getitem__(self, key)
else:
raise KeyError
def __setitem__(self, key, value):
# NOTE(isethi): Exceptions are raised only for actions update, delete
# and create, where the user proactively interacts with the properties.
# A user cannot request to read a specific property, hence reads do
# raise an exception
try:
if self.__getitem__(key) is not None:
if self.property_rules.check_property_rules(key, 'update',
self.context):
return dict.__setitem__(self, key, value)
else:
raise exception.ReservedProperty(property=key)
except KeyError:
if self.property_rules.check_property_rules(key, 'create',
self.context):
return dict.__setitem__(self, key, value)
else:
raise exception.ReservedProperty(property=key)
def __delitem__(self, key):
if key not in super(ExtraPropertiesProxy, self).keys():
raise KeyError
if self.property_rules.check_property_rules(key, 'delete',
self.context):
return dict.__delitem__(self, key)
else:
raise exception.ReservedProperty(property=key)

View File

@ -0,0 +1,23 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
SUPPORTED_FILTERS = ['name', 'status','cluster_id','id','host_id', 'role_id', 'auto_scale','container_format', 'disk_format',
'min_ram', 'min_disk', 'size_min', 'size_max',
'is_public', 'changes-since', 'protected']
SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir')
# Metadata which only an admin can change once the image is active
ACTIVE_IMMUTABLE = ('size', 'checksum')

View File

@ -0,0 +1,742 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/clusters endpoint for Daisy v1 API
"""
import copy
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound
from webob.exc import HTTPServerError
from webob import Response
from daisy.api import policy
import daisy.api.v1
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy import i18n
from daisy import notifier
import daisy.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
CLUSTER_DEFAULT_NETWORKS = ['PUBLIC', 'DEPLOYMENT', 'PRIVATE', 'EXTERNAL',
'STORAGE', 'VXLAN', 'MANAGEMENT']
class Controller(controller.BaseController):
"""
WSGI controller for clusters resource in Daisy v1 API
The clusters resource API is a RESTful web service for cluster data. The API
is as follows::
GET /clusters -- Returns a set of brief metadata about clusters
GET /clusters -- Returns a set of detailed metadata about
clusters
HEAD /clusters/<ID> -- Return metadata about an cluster with id <ID>
GET /clusters/<ID> -- Return cluster data for cluster with id <ID>
POST /clusters -- Store cluster data and return metadata about the
newly-stored cluster
PUT /clusters/<ID> -- Update cluster metadata and/or upload cluster
data for a previously-reserved cluster
DELETE /clusters/<ID> -- Delete the cluster with id <ID>
"""
def check_params(f):
"""
Cluster add and update operation params valid check.
:param f: Function hanle for 'cluster_add' and 'cluster_update'.
:return: f
"""
def wrapper(*args, **kwargs):
controller, req = args
cluster_meta = kwargs.get('cluster_meta', None)
cluster_id = kwargs.get('id', None)
errmsg = (_("I'm params checker."))
LOG.debug(_("Params check for cluster-add or cluster-update begin!"))
def check_params_range(param, type=None):
'''
param : input a list ,such as [start, end]
check condition: start must less than end, and existed with pair
return True of False
'''
if len(param) != 2:
msg = '%s range must be existed in pairs.' % type
raise HTTPForbidden(explanation=msg)
if param[0] == None or param[0] == '':
msg = 'The start value of %s range can not be None.' % type
raise HTTPForbidden(explanation=msg)
if param[1] == None:
msg = 'The end value of %s range can not be None.' % type
raise HTTPForbidden(explanation=msg)
if int(param[0]) > int(param[1]):
msg = 'The start value of the %s range must be less than the end value.' % type
raise HTTPForbidden(explanation=msg)
if type not in ['vni']:
if int(param[0]) < 0 or int(param[0]) > 4096:
msg = 'Invalid value of the start value(%s) of the %s range .' % (param[0], type)
raise HTTPForbidden(explanation=msg)
if int(param[1]) < 0 or int(param[1]) > 4096:
msg = 'Invalid value of the end value(%s) of the %s range .' % (param[1], type)
raise HTTPForbidden(explanation=msg)
else:
if int(param[0]) < 0 or int(param[0]) > 16777216:
msg = 'Invalid value of the start value(%s) of the %s range .' % (param[0], type)
raise HTTPForbidden(explanation=msg)
if int(param[1]) < 0 or int(param[1]) > 16777216:
msg = 'Invalid value of the end value(%s) of the %s range .' % (param[1], type)
raise HTTPForbidden(explanation=msg)
return True
def _check_auto_scale(req, cluster_meta):
if cluster_meta.has_key('auto_scale') and cluster_meta['auto_scale'] =='1':
meta = { "auto_scale":'1' }
params = { 'filters': meta }
clusters = registry.get_clusters_detail(req.context, **params)
if clusters:
if cluster_id:
temp_cluster = [cluster for cluster in clusters if cluster['id'] !=cluster_id]
if temp_cluster:
errmsg = (_("already exist cluster auto_scale is true"))
raise HTTPBadRequest(explanation=errmsg)
else:
errmsg = (_("already exist cluster auto_scale is true"))
raise HTTPBadRequest(explanation=errmsg)
def _ip_into_int(ip):
"""
Switch ip string to decimalism integer..
:param ip: ip string
:return: decimalism integer
"""
return reduce(lambda x, y: (x<<8)+y, map(int, ip.split('.')))
def _is_in_network_range(ip, network):
"""
Check ip is in range
:param ip: Ip will be checked, like:192.168.1.2.
:param network: Ip range,like:192.168.0.0/24.
:return: If ip in range,return True,else return False.
"""
network = network.split('/')
mask = ~(2**(32 - int(network[1])) - 1)
return (_ip_into_int(ip) & mask) == (_ip_into_int(network[0]) & mask)
def _check_param_nonull_and_valid(values_set, keys_set, valids_set={}):
"""
Check operation params is not null and valid.
:param values_set: Params set.
:param keys_set: Params will be checked.
:param valids_set:
:return:
"""
for k in keys_set:
v = values_set.get(k, None)
if type(v) == type(True) and v == None:
errmsg = (_("Segment %s can't be None." % k))
raise HTTPBadRequest(explanation=errmsg)
elif type(v) != type(True) and not v:
errmsg = (_("Segment %s can't be None." % k))
raise HTTPBadRequest(explanation=errmsg)
for (k, v) in valids_set.items():
# if values_set.get(k, None) and values_set[k] not in v:
if values_set.get(k, None) and -1 == v.find(values_set[k]):
errmsg = (_("Segment %s is out of valid range." % k))
raise HTTPBadRequest(explanation=errmsg)
def _get_network_detail(req, cluster_id, networks_list):
all_network_list = []
if cluster_id:
all_network_list = registry.get_networks_detail(req.context, cluster_id)
if networks_list:
for net_id in networks_list:
network_detail = registry.get_network_metadata(req.context, net_id)
all_network_list.append(network_detail)
all_private_network_list = \
[network for network in all_network_list if network['network_type'] == "PRIVATE"]
return all_private_network_list
def _check_cluster_add_parameters(req, cluster_meta):
"""
By params set segment,check params is available.
:param req: http req
:param cluster_meta: params set
:return:error message
"""
if cluster_meta.has_key('nodes'):
orig_keys = list(eval(cluster_meta['nodes']))
for host_id in orig_keys:
controller._raise_404_if_host_deleted(req, host_id)
if cluster_meta.has_key('networks'):
orig_keys = list(eval(cluster_meta['networks']))
network_with_same_name = []
for network_id in orig_keys:
network_name = controller._raise_404_if_network_deleted(req, network_id)
if network_name in CLUSTER_DEFAULT_NETWORKS:
return (_("Network name %s of %s already exits"
" in the cluster, please check." %
(network_name, network_id)))
if network_name in network_with_same_name:
return (_("Network name can't be same with each other in 'networks[]', "
"please check."))
network_with_same_name.append(network_name)
# checkout network_params--------------------------------------------------
if cluster_meta.get('networking_parameters', None):
networking_parameters = eval(cluster_meta['networking_parameters'])
_check_param_nonull_and_valid(networking_parameters,
['segmentation_type'])
segmentation_type_set = networking_parameters['segmentation_type'].split(",")
for segmentation_type in segmentation_type_set:
if segmentation_type not in ['vlan', 'vxlan', 'flat', 'gre']:
return (_("Segmentation_type of networking_parameters is not valid."))
if segmentation_type =='vxlan':
_check_param_nonull_and_valid(networking_parameters,['vni_range'])
elif segmentation_type =='gre':
_check_param_nonull_and_valid(networking_parameters,['gre_id_range'])
vlan_range = networking_parameters.get("vlan_range", None)
vni_range = networking_parameters.get("vni_range", None)
gre_id_range = networking_parameters.get("gre_id_range", None)
#if (vlan_range and len(vlan_range) != 2) \
# or (vni_range and len(vni_range) != 2) \
# or (gre_id_range and len(gre_id_range) != 2):
# return (_("Range params must be pair."))
if vlan_range:
check_params_range(vlan_range, 'vlan')
if vni_range:
check_params_range(vni_range, 'vni')
if gre_id_range:
check_params_range(gre_id_range, 'gre_id')
# check logic_networks--------------------------------------------------
subnet_name_set = [] # record all subnets's name
logic_network_name_set = [] # record all logic_network's name
subnets_in_logic_network = {}
external_logic_network_name = []
if cluster_meta.get('logic_networks', None):
# get physnet_name list
all_private_cluster_networks_list = _get_network_detail(
req, cluster_id,
cluster_meta.get('networks', None)
if not isinstance(cluster_meta.get('networks', None), unicode)
else eval(cluster_meta.get('networks', None)))
if not all_private_cluster_networks_list:
LOG.info("Private network is empty in db, it lead logical network config invalid.")
physnet_name_set = [net['name'] for net in all_private_cluster_networks_list]
logic_networks = eval(cluster_meta['logic_networks'])
for logic_network in logic_networks:
subnets_in_logic_network[logic_network['name']] = []
# We force setting the physnet_name of flat logical network to 'flat'.
if logic_network.get('segmentation_type', None) == "flat":
if logic_network['physnet_name'] != "physnet1" or logic_network['type'] != "external":
LOG.info("When 'segmentation_type' is flat the 'physnet_name' and 'type' segmentation"
"must be 'physnet1'' and 'external'', but got '%s' and '%s'.We have changed"
"it to the valid value.")
logic_network['physnet_name'] = "physnet1"
logic_network['type'] = "external"
physnet_name_set.append("physnet1")
_check_param_nonull_and_valid(
logic_network,
['name', 'type', 'physnet_name', 'segmentation_type', 'shared', 'segmentation_id'],
{'segmentation_type' : networking_parameters['segmentation_type'],
'physnet_name' : ','.join(physnet_name_set),
'type' : ','.join(["external", "internal"])})
if logic_network['type'] == "external":
external_logic_network_name.append(logic_network['name'])
logic_network_name_set.append(logic_network['name'])
# By segmentation_type check segmentation_id is in range
segmentation_id = logic_network.get('segmentation_id', None)
if segmentation_id:
err = "Segmentation_id is out of private network %s of %s.Vaild range is [%s, %s]."
segmentation_type = logic_network.get('segmentation_type', None)
if 0 == cmp(segmentation_type, "vlan"):
private_vlan_range = \
[(net['vlan_start'], net['vlan_end'])
for net in all_private_cluster_networks_list
if logic_network['physnet_name'] == net['name']]
if private_vlan_range and \
not private_vlan_range[0][0] or \
not private_vlan_range[0][1]:
return (_("Private network plane %s don't config the 'vlan_start' or "
"'vlan_end' parameter."))
if int(segmentation_id) not in range(private_vlan_range[0][0], private_vlan_range[0][1]):
return (_(err % ("vlan_range", logic_network['physnet_name'],
private_vlan_range[0][0], private_vlan_range[0][1])))
elif 0 == cmp(segmentation_type, "vxlan") and vni_range:
if int(segmentation_id) not in range(vni_range[0], vni_range[1]):
return (_("Segmentation_id is out of vni_range."))
elif 0 == cmp(segmentation_type, "gre") and gre_id_range:
if int(segmentation_id) not in range(gre_id_range[0], gre_id_range[1]):
return (_("Segmentation_id is out of gre_id_range."))
# checkout subnets params--------------------------------------------------
if logic_network.get('subnets', None):
subnet_data = logic_network['subnets']
for subnet in subnet_data:
_check_param_nonull_and_valid(
subnet,
['name', 'cidr'])
subnet_name_set.append(subnet['name'])
# By cidr check floating_ranges is in range and not overlap
#---------------start-----
if subnet['gateway'] and not _is_in_network_range(subnet['gateway'], subnet['cidr']):
return (_("Wrong gateway format."))
if subnet['floating_ranges']:
inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)])
floating_ranges_with_int_ip = list()
sorted_floating_ranges = list()
sorted_floating_ranges_with_int_ip = list()
for floating_ip in subnet['floating_ranges']:
if len(floating_ip) != 2:
return (_("Floating ip must be paris."))
ip_start = _ip_into_int(floating_ip[0])
ip_end = _ip_into_int(floating_ip[1])
if ip_start > ip_end:
return (_("Wrong floating ip format."))
floating_ranges_with_int_ip.append([ip_start, ip_end])
sorted_floating_ranges_with_int_ip = sorted(floating_ranges_with_int_ip, key=lambda x : x[0])
for ip_range in sorted_floating_ranges_with_int_ip:
ip_start = inter_ip(ip_range[0])
ip_end = inter_ip(ip_range[1])
sorted_floating_ranges.append([ip_start, ip_end])
last_rang_ip = []
for floating in sorted_floating_ranges:
if not _is_in_network_range(floating[0], subnet['cidr']) \
or not _is_in_network_range(floating[1], subnet['cidr']):
return (_("Floating ip or gateway is out of range cidr."))
err_list = [err for err in last_rang_ip if _ip_into_int(floating[0]) < err]
if last_rang_ip and 0 < len(err_list):
return (_("Between floating ip range can not be overlap."))
last_rang_ip.append(_ip_into_int(floating[1]))
subnets_in_logic_network[logic_network['name']].append(subnet['name'])
# check external logical network uniqueness
if len(external_logic_network_name) > 1:
return (_("External logical network is uniqueness in the cluster.Got %s." %
",".join(external_logic_network_name)))
# check logic_network_name uniqueness
if len(logic_network_name_set) != len(set(logic_network_name_set)):
return (_("Logic network name segment is repetition."))
# check subnet_name uniqueness
if len(subnet_name_set) != len(set(subnet_name_set)):
return (_("Subnet name segment is repetition."))
cluster_meta['logic_networks'] = unicode(logic_networks)
# check routers--------------------------------------------------
subnet_name_set_deepcopy = copy.deepcopy(subnet_name_set)
router_name_set = [] # record all routers name
if cluster_meta.get('routers', None):
router_data = eval(cluster_meta['routers'])
for router in router_data:
_check_param_nonull_and_valid(router, ['name'])
# check relevance logic_network is valid
external_logic_network_data = router.get('external_logic_network', None)
if external_logic_network_data and \
external_logic_network_data not in logic_network_name_set:
return (_("Logic_network %s is not valid range." % external_logic_network_data))
router_name_set.append(router['name'])
# check relevance subnets is valid
for subnet in router.get('subnets', []):
if subnet not in subnet_name_set:
return (_("Subnet %s is not valid range." % subnet))
# subnet cann't relate with two routers
if subnet not in subnet_name_set_deepcopy:
return (_("The subnet can't be related with multiple routers."))
subnet_name_set_deepcopy.remove(subnet)
if external_logic_network_data and \
subnets_in_logic_network[external_logic_network_data] and \
set(subnets_in_logic_network[external_logic_network_data]). \
issubset(set(router['subnets'])):
return (_("Logic network's subnets is all related with a router, it's not allowed."))
# check subnet_name uniqueness
if len(router_name_set) != len(set(router_name_set)):
return (_("Router name segment is repetition."))
return (_("I'm params checker."))
_check_auto_scale(req, cluster_meta)
check_result = _check_cluster_add_parameters(req, cluster_meta)
if 0 != cmp(check_result, errmsg):
LOG.exception(_("Params check for cluster-add or cluster-update is failed!"))
raise HTTPBadRequest(explanation=check_result)
LOG.debug(_("Params check for cluster-add or cluster-update is done!"))
return f(*args, **kwargs)
return wrapper
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _raise_404_if_host_deleted(self, req, host_id):
host = self.get_host_meta_or_404(req, host_id)
if host['deleted']:
msg = _("Host with identifier %s has been deleted.") % host_id
raise HTTPNotFound(msg)
def _raise_404_if_network_deleted(self, req, network_id):
network = self.get_network_meta_or_404(req, network_id)
if network['deleted']:
msg = _("Network with identifier %s has been deleted.") % network_id
raise HTTPNotFound(msg)
return network.get('name', None)
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
@utils.mutating
@check_params
def add_cluster(self, req, cluster_meta):
"""
Adds a new cluster to Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about cluster
:raises HTTPBadRequest if x-cluster-name is missing
"""
self._enforce(req, 'add_cluster')
cluster_name = cluster_meta["name"]
if not cluster_name:
raise ValueError('cluster name is null!')
cluster_name_split = cluster_name.split('_')
for cluster_name_info in cluster_name_split :
if not cluster_name_info.isalnum():
raise ValueError('cluster name must be numbers or letters or underscores !')
if cluster_meta.get('nodes', None):
orig_keys = list(eval(cluster_meta['nodes']))
for host_id in orig_keys:
self._raise_404_if_host_deleted(req, host_id)
node = registry.get_host_metadata(req.context, host_id)
if node['status'] == 'in-cluster':
msg = _("Forbidden to add host %s with status "
"'in-cluster' in another cluster") % host_id
raise HTTPForbidden(explanation=msg)
if node.get('interfaces', None):
interfaces = node['interfaces']
input_host_pxe_info = [interface for interface in interfaces
if interface.get('is_deployment', None) == 1]
if not input_host_pxe_info and node.get('os_status',None) != 'active':
msg = _("The host %s has more than one dhcp server, "
"please choose one interface for deployment") % host_id
raise HTTPServerError(explanation=msg)
print cluster_name
print cluster_meta
cluster_meta = registry.add_cluster_metadata(req.context, cluster_meta)
return {'cluster_meta': cluster_meta}
@utils.mutating
def delete_cluster(self, req, id):
"""
Deletes a cluster from Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about cluster
:raises HTTPBadRequest if x-cluster-name is missing
"""
self._enforce(req, 'delete_cluster')
#cluster = self.get_cluster_meta_or_404(req, id)
print "delete_cluster:%s" % id
try:
registry.delete_cluster_metadata(req.context, id)
except exception.NotFound as e:
msg = (_("Failed to find cluster to delete: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete cluster: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_("cluster %(id)s could not be deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
else:
#self.notifier.info('cluster.delete', cluster)
return Response(body='', status=200)
@utils.mutating
def get_cluster(self, req, id):
"""
Returns metadata about an cluster in the HTTP headers of the
response object
:param req: The WSGI/Webob Request object
:param id: The opaque cluster identifier
:raises HTTPNotFound if cluster metadata is not available to user
"""
self._enforce(req, 'get_cluster')
cluster_meta = self.get_cluster_meta_or_404(req, id)
return {'cluster_meta': cluster_meta}
def detail(self, req):
"""
Returns detailed information for all available clusters
:param req: The WSGI/Webob Request object
:retval The response body is a mapping of the following form::
{'clusters': [
{'id': <ID>,
'name': <NAME>,
'nodes': <NODES>,
'networks': <NETWORKS>,
'description': <DESCRIPTION>,
'created_at': <TIMESTAMP>,
'updated_at': <TIMESTAMP>,
'deleted_at': <TIMESTAMP>|<NONE>,}, ...
]}
"""
self._enforce(req, 'get_clusters')
params = self._get_query_params(req)
try:
clusters = registry.get_clusters_detail(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(clusters=clusters)
@utils.mutating
@check_params
def update_cluster(self, req, id, cluster_meta):
"""
Updates an existing cluster with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque cluster identifier
:retval Returns the updated cluster information as a mapping
"""
self._enforce(req, 'update_cluster')
if cluster_meta.has_key('nodes'):
orig_keys = list(eval(cluster_meta['nodes']))
for host_id in orig_keys:
self._raise_404_if_host_deleted(req, host_id)
node = registry.get_host_metadata(req.context, host_id)
if node['status'] == 'in-cluster':
host_cluster = registry.get_host_clusters(req.context, host_id)
if host_cluster[0]['cluster_id'] != id:
msg = _("Forbidden to add host %s with status "
"'in-cluster' in another cluster") % host_id
raise HTTPForbidden(explanation=msg)
if node.get('interfaces', None):
interfaces = node['interfaces']
input_host_pxe_info = [interface for interface in interfaces
if interface.get('is_deployment', None) == 1]
if not input_host_pxe_info and node.get('os_status', None) != 'active':
msg = _("The host %s has more than one dhcp server, "
"please choose one interface for deployment") % host_id
raise HTTPServerError(explanation=msg)
if cluster_meta.has_key('networks'):
orig_keys = list(eval(cluster_meta['networks']))
for network_id in orig_keys:
self._raise_404_if_network_deleted(req, network_id)
orig_cluster_meta = self.get_cluster_meta_or_404(req, id)
# Do not allow any updates on a deleted cluster.
# Fix for LP Bug #1060930
if orig_cluster_meta['deleted']:
msg = _("Forbidden to update deleted cluster.")
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
try:
cluster_meta = registry.update_cluster_metadata(req.context,
id,
cluster_meta)
except exception.Invalid as e:
msg = (_("Failed to update cluster metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find cluster to update: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update cluster: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('Cluster operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('cluster.update', cluster_meta)
return {'cluster_meta': cluster_meta}
class ProjectDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result["cluster_meta"] = utils.get_cluster_meta(request)
return result
def add_cluster(self, request):
return self._deserialize(request)
def update_cluster(self, request):
return self._deserialize(request)
class ProjectSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def add_cluster(self, response, result):
cluster_meta = result['cluster_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(cluster=cluster_meta))
return response
def update_cluster(self, response, result):
cluster_meta = result['cluster_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(cluster=cluster_meta))
return response
def delete_cluster(self, response, result):
cluster_meta = result['cluster_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(cluster=cluster_meta))
return response
def get_cluster(self, response, result):
cluster_meta = result['cluster_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(cluster=cluster_meta))
return response
def create_resource():
"""Projects resource factory method"""
deserializer = ProjectDeserializer()
serializer = ProjectSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -0,0 +1,328 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/components endpoint for Daisy v1 API
"""
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound
from webob import Response
from daisy.api import policy
import daisy.api.v1
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy import i18n
from daisy import notifier
import daisy.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController):
"""
WSGI controller for components resource in Daisy v1 API
The components resource API is a RESTful web service for component data. The API
is as follows::
GET /components -- Returns a set of brief metadata about components
GET /components/detail -- Returns a set of detailed metadata about
components
HEAD /components/<ID> -- Return metadata about an component with id <ID>
GET /components/<ID> -- Return component data for component with id <ID>
POST /components -- Store component data and return metadata about the
newly-stored component
PUT /components/<ID> -- Update component metadata and/or upload component
data for a previously-reserved component
DELETE /components/<ID> -- Delete the component with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
@utils.mutating
def add_component(self, req, component_meta):
"""
Adds a new component to Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about component
:raises HTTPBadRequest if x-component-name is missing
"""
self._enforce(req, 'add_component')
#component_id=component_meta["id"]
#component_owner=component_meta["owner"]
component_name = component_meta["name"]
component_description = component_meta["description"]
#print component_id
#print component_owner
print component_name
print component_description
component_meta = registry.add_component_metadata(req.context, component_meta)
return {'component_meta': component_meta}
@utils.mutating
def delete_component(self, req, id):
"""
Deletes a component from Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about component
:raises HTTPBadRequest if x-component-name is missing
"""
self._enforce(req, 'delete_component')
#component = self.get_component_meta_or_404(req, id)
print "delete_component:%s" % id
try:
registry.delete_component_metadata(req.context, id)
except exception.NotFound as e:
msg = (_("Failed to find component to delete: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete component: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_("component %(id)s could not be deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
else:
#self.notifier.info('component.delete', component)
return Response(body='', status=200)
@utils.mutating
def get_component(self, req, id):
"""
Returns metadata about an component in the HTTP headers of the
response object
:param req: The WSGI/Webob Request object
:param id: The opaque component identifier
:raises HTTPNotFound if component metadata is not available to user
"""
self._enforce(req, 'get_component')
component_meta = self.get_component_meta_or_404(req, id)
return {'component_meta': component_meta}
def detail(self, req):
"""
Returns detailed information for all available components
:param req: The WSGI/Webob Request object
:retval The response body is a mapping of the following form::
{'components': [
{'id': <ID>,
'name': <NAME>,
'description': <DESCRIPTION>,
'created_at': <TIMESTAMP>,
'updated_at': <TIMESTAMP>,
'deleted_at': <TIMESTAMP>|<NONE>,}, ...
]}
"""
self._enforce(req, 'get_components')
params = self._get_query_params(req)
try:
components = registry.get_components_detail(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(components=components)
@utils.mutating
def update_component(self, req, id, component_meta):
"""
Updates an existing component with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'modify_image')
orig_component_meta = self.get_component_meta_or_404(req, id)
# Do not allow any updates on a deleted image.
# Fix for LP Bug #1060930
if orig_component_meta['deleted']:
msg = _("Forbidden to update deleted component.")
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
try:
component_meta = registry.update_component_metadata(req.context,
id,
component_meta)
except exception.Invalid as e:
msg = (_("Failed to update component metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find component to update: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update component: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('Host operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('component.update', component_meta)
return {'component_meta': component_meta}
class ComponentDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result["component_meta"] = utils.get_component_meta(request)
return result
def add_component(self, request):
return self._deserialize(request)
def update_component(self, request):
return self._deserialize(request)
class ComponentSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def add_component(self, response, result):
component_meta = result['component_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(component=component_meta))
return response
def delete_component(self, response, result):
component_meta = result['component_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(component=component_meta))
return response
def get_component(self, response, result):
component_meta = result['component_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(component=component_meta))
return response
def create_resource():
"""Components resource factory method"""
deserializer = ComponentDeserializer()
serializer = ComponentSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -0,0 +1,325 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/config_files endpoint for Daisy v1 API
"""
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound
from webob import Response
from daisy.api import policy
import daisy.api.v1
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy import i18n
from daisy import notifier
import daisy.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController):
"""
WSGI controller for config_files resource in Daisy v1 API
The config_files resource API is a RESTful web service for config_file data. The API
is as follows::
GET /config_files -- Returns a set of brief metadata about config_files
GET /config_files/detail -- Returns a set of detailed metadata about
config_files
HEAD /config_files/<ID> -- Return metadata about an config_file with id <ID>
GET /config_files/<ID> -- Return config_file data for config_file with id <ID>
POST /config_files -- Store config_file data and return metadata about the
newly-stored config_file
PUT /config_files/<ID> -- Update config_file metadata and/or upload config_file
data for a previously-reserved config_file
DELETE /config_files/<ID> -- Delete the config_file with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
@utils.mutating
def add_config_file(self, req, config_file_meta):
"""
Adds a new config_file to Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about config_file
:raises HTTPBadRequest if x-config_file-name is missing
"""
self._enforce(req, 'add_config_file')
#config_file_id=config_file_meta["id"]
config_file_name = config_file_meta["name"]
config_file_description = config_file_meta["description"]
#print config_file_id
print config_file_name
print config_file_description
config_file_meta = registry.add_config_file_metadata(req.context, config_file_meta)
return {'config_file_meta': config_file_meta}
@utils.mutating
def delete_config_file(self, req, id):
"""
Deletes a config_file from Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about config_file
:raises HTTPBadRequest if x-config_file-name is missing
"""
self._enforce(req, 'delete_config_file')
try:
registry.delete_config_file_metadata(req.context, id)
except exception.NotFound as e:
msg = (_("Failed to find config_file to delete: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete config_file: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_("config_file %(id)s could not be deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
else:
#self.notifier.info('config_file.delete', config_file)
return Response(body='', status=200)
@utils.mutating
def get_config_file(self, req, id):
"""
Returns metadata about an config_file in the HTTP headers of the
response object
:param req: The WSGI/Webob Request object
:param id: The opaque config_file identifier
:raises HTTPNotFound if config_file metadata is not available to user
"""
self._enforce(req, 'get_config_file')
config_file_meta = self.get_config_file_meta_or_404(req, id)
return {'config_file_meta': config_file_meta}
def detail(self, req):
"""
Returns detailed information for all available config_files
:param req: The WSGI/Webob Request object
:retval The response body is a mapping of the following form::
{'config_files': [
{'id': <ID>,
'name': <NAME>,
'description': <DESCRIPTION>,
'created_at': <TIMESTAMP>,
'updated_at': <TIMESTAMP>,
'deleted_at': <TIMESTAMP>|<NONE>,}, ...
]}
"""
self._enforce(req, 'get_config_files')
params = self._get_query_params(req)
try:
config_files = registry.get_config_files_detail(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(config_files=config_files)
@utils.mutating
def update_config_file(self, req, id, config_file_meta):
"""
Updates an existing config_file with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'modify_image')
orig_config_file_meta = self.get_config_file_meta_or_404(req, id)
# Do not allow any updates on a deleted image.
# Fix for LP Bug #1060930
if orig_config_file_meta['deleted']:
msg = _("Forbidden to update deleted config_file.")
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
try:
config_file_meta = registry.update_config_file_metadata(req.context,
id,
config_file_meta)
except exception.Invalid as e:
msg = (_("Failed to update config_file metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find config_file to update: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update config_file: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('config_file operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('config_file.update', config_file_meta)
return {'config_file_meta': config_file_meta}
class Config_fileDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result["config_file_meta"] = utils.get_config_file_meta(request)
return result
def add_config_file(self, request):
return self._deserialize(request)
def update_config_file(self, request):
return self._deserialize(request)
class Config_fileSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def add_config_file(self, response, result):
config_file_meta = result['config_file_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(config_file=config_file_meta))
return response
def delete_config_file(self, response, result):
config_file_meta = result['config_file_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(config_file=config_file_meta))
return response
def get_config_file(self, response, result):
config_file_meta = result['config_file_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(config_file=config_file_meta))
return response
def create_resource():
"""config_files resource factory method"""
deserializer = Config_fileDeserializer()
serializer = Config_fileSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -0,0 +1,434 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/config_sets endpoint for Daisy v1 API
"""
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound
from webob import Response
from daisy.api import policy
import daisy.api.v1
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy import i18n
from daisy import notifier
import daisy.registry.client.v1.api as registry
from daisy.api.configset import manager
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController):
"""
WSGI controller for config_sets resource in Daisy v1 API
The config_sets resource API is a RESTful web service for config_set data. The API
is as follows::
GET /config_sets -- Returns a set of brief metadata about config_sets
GET /config_sets/detail -- Returns a set of detailed metadata about
config_sets
HEAD /config_sets/<ID> -- Return metadata about an config_set with id <ID>
GET /config_sets/<ID> -- Return config_set data for config_set with id <ID>
POST /config_sets -- Store config_set data and return metadata about the
newly-stored config_set
PUT /config_sets/<ID> -- Update config_set metadata and/or upload config_set
data for a previously-reserved config_set
DELETE /config_sets/<ID> -- Delete the config_set with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']:
msg = _("cluster with identifier %s has been deleted.") % cluster_id
raise HTTPNotFound(msg)
@utils.mutating
def add_config_set(self, req, config_set_meta):
"""
Adds a new config_set to Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about config_set
:raises HTTPBadRequest if x-config_set-name is missing
"""
self._enforce(req, 'add_config_set')
#config_set_id=config_set_meta["id"]
config_set_name = config_set_meta["name"]
config_set_description = config_set_meta["description"]
#print config_set_id
print config_set_name
print config_set_description
config_set_meta = registry.add_config_set_metadata(req.context, config_set_meta)
return {'config_set_meta': config_set_meta}
@utils.mutating
def delete_config_set(self, req, id):
"""
Deletes a config_set from Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about config_set
:raises HTTPBadRequest if x-config_set-name is missing
"""
self._enforce(req, 'delete_config_set')
try:
registry.delete_config_set_metadata(req.context, id)
except exception.NotFound as e:
msg = (_("Failed to find config_set to delete: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete config_set: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_("config_set %(id)s could not be deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
else:
#self.notifier.info('config_set.delete', config_set)
return Response(body='', status=200)
@utils.mutating
def get_config_set(self, req, id):
"""
Returns metadata about an config_set in the HTTP headers of the
response object
:param req: The WSGI/Webob Request object
:param id: The opaque config_set identifier
:raises HTTPNotFound if config_set metadata is not available to user
"""
self._enforce(req, 'get_config_set')
config_set_meta = self.get_config_set_meta_or_404(req, id)
return {'config_set_meta': config_set_meta}
def detail(self, req):
"""
Returns detailed information for all available config_sets
:param req: The WSGI/Webob Request object
:retval The response body is a mapping of the following form::
{'config_sets': [
{'id': <ID>,
'name': <NAME>,
'description': <DESCRIPTION>,
'created_at': <TIMESTAMP>,
'updated_at': <TIMESTAMP>,
'deleted_at': <TIMESTAMP>|<NONE>,}, ...
]}
"""
self._enforce(req, 'get_config_sets')
params = self._get_query_params(req)
try:
config_sets = registry.get_config_sets_detail(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(config_sets=config_sets)
@utils.mutating
def update_config_set(self, req, id, config_set_meta):
"""
Updates an existing config_set with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'modify_image')
orig_config_set_meta = self.get_config_set_meta_or_404(req, id)
# Do not allow any updates on a deleted image.
# Fix for LP Bug #1060930
if orig_config_set_meta['deleted']:
msg = _("Forbidden to update deleted config_set.")
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
try:
config_set_meta = registry.update_config_set_metadata(req.context,
id,
config_set_meta)
except exception.Invalid as e:
msg = (_("Failed to update config_set metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find config_set to update: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update config_set: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('config_set operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('config_set.update', config_set_meta)
return {'config_set_meta': config_set_meta}
def _raise_404_if_role_exist(self,req,config_set_meta):
role_id_list=[]
try:
roles = registry.get_roles_detail(req.context)
for role in roles:
for role_name in eval(config_set_meta['role']):
if role['cluster_id'] == config_set_meta['cluster'] and role['name'] == role_name:
role_id_list.append(role['id'])
break
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return role_id_list
@utils.mutating
def cluster_config_set_update(self, req, config_set_meta):
if config_set_meta.has_key('cluster'):
orig_cluster = str(config_set_meta['cluster'])
self._raise_404_if_cluster_deleted(req, orig_cluster)
try:
if config_set_meta.get('role',None):
role_id_list=self._raise_404_if_role_exist(req,config_set_meta)
if len(role_id_list) == len(eval(config_set_meta['role'])):
for role_id in role_id_list:
backend=manager.configBackend('clushshell', req, role_id)
backend.push_config()
else:
msg = "the role is not exist"
LOG.error(msg)
raise HTTPNotFound(msg)
else:
roles = registry.get_roles_detail(req.context)
for role in roles:
if role['cluster_id'] == config_set_meta['cluster']:
backend=manager.configBackend('clushshell', req, role['id'])
backend.push_config()
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
config_status={"status":"config successful"}
return {'config_set':config_status}
else:
msg = "the cluster is not exist"
LOG.error(msg)
raise HTTPNotFound(msg)
@utils.mutating
def cluster_config_set_progress(self, req, config_set_meta):
role_list = []
if config_set_meta.has_key('cluster'):
orig_cluster = str(config_set_meta['cluster'])
self._raise_404_if_cluster_deleted(req, orig_cluster)
try:
if config_set_meta.get('role',None):
role_id_list=self._raise_404_if_role_exist(req,config_set_meta)
if len(role_id_list) == len(eval(config_set_meta['role'])):
for role_id in role_id_list:
role_info = {}
role_meta=registry.get_role_metadata(req.context, role_id)
role_info['role-name']=role_meta['name']
role_info['config_set_update_progress']=role_meta['config_set_update_progress']
role_list.append(role_info)
else:
msg = "the role is not exist"
LOG.error(msg)
raise HTTPNotFound(msg)
else:
roles = registry.get_roles_detail(req.context)
for role in roles:
if role['cluster_id'] == config_set_meta['cluster']:
role_info = {}
role_info['role-name']=role['name']
role_info['config_set_update_progress']=role['config_set_update_progress']
role_list.append(role_info)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return role_list
else:
msg = "the cluster is not exist"
LOG.error(msg)
raise HTTPNotFound(msg)
class Config_setDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result["config_set_meta"] = utils.get_config_set_meta(request)
return result
def add_config_set(self, request):
return self._deserialize(request)
def update_config_set(self, request):
return self._deserialize(request)
def cluster_config_set_update(self, request):
return self._deserialize(request)
def cluster_config_set_progress(self, request):
return self._deserialize(request)
class Config_setSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def add_config_set(self, response, result):
config_set_meta = result['config_set_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(config_set=config_set_meta))
return response
def delete_config_set(self, response, result):
config_set_meta = result['config_set_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(config_set=config_set_meta))
return response
def get_config_set(self, response, result):
config_set_meta = result['config_set_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(config_set=config_set_meta))
return response
def cluster_config_set_update(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def cluster_config_set_progress(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(config_set=result))
return response
def create_resource():
"""config_sets resource factory method"""
deserializer = Config_setDeserializer()
serializer = Config_setSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -0,0 +1,301 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/configs endpoint for Daisy v1 API
"""
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound
from webob import Response
from daisy.api import policy
import daisy.api.v1
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy import i18n
from daisy import notifier
import daisy.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController):
"""
WSGI controller for configs resource in Daisy v1 API
The configs resource API is a RESTful web service for config data. The API
is as follows::
GET /configs -- Returns a set of brief metadata about configs
GET /configs/detail -- Returns a set of detailed metadata about
configs
HEAD /configs/<ID> -- Return metadata about an config with id <ID>
GET /configs/<ID> -- Return config data for config with id <ID>
POST /configs -- Store config data and return metadata about the
newly-stored config
PUT /configs/<ID> -- Update config metadata and/or upload config
data for a previously-reserved config
DELETE /configs/<ID> -- Delete the config with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
def _raise_404_if_config_set_delete(self, req, config_set_id):
config_set = self.get_config_set_meta_or_404(req, config_set_id)
if config_set['deleted']:
msg = _("config_set with identifier %s has been deleted.") % config_set_id
raise HTTPNotFound(msg)
def _raise_404_if_config_file_delete(self, req, config_file_id):
config_file = self.get_config_file_meta_or_404(req, config_file_id)
if config_file['deleted']:
msg = _("config_file with identifier %s has been deleted.") % config_file_id
raise HTTPNotFound(msg)
def _raise_404_if_role_exist(self,req,config_meta):
role_id=""
try:
roles = registry.get_roles_detail(req.context)
for role in roles:
if role['cluster_id'] == config_meta['cluster'] and role['name'] == config_meta['role']:
role_id=role['id']
break
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return role_id
def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']:
msg = _("cluster with identifier %s has been deleted.") % cluster_id
raise HTTPNotFound(msg)
@utils.mutating
def add_config(self, req, config_meta):
"""
Adds a new config to Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about config
:raises HTTPBadRequest if x-config-name is missing
"""
self._enforce(req, 'add_config')
if config_meta.has_key('cluster'):
orig_cluster = str(config_meta['cluster'])
self._raise_404_if_cluster_deleted(req, orig_cluster)
if config_meta.has_key('role'):
role_id=self._raise_404_if_role_exist(req,config_meta)
if not role_id:
msg = "the role name is not exist"
LOG.error(msg)
raise HTTPNotFound(msg)
config_meta = registry.config_interface_metadata(req.context, config_meta)
return config_meta
@utils.mutating
def delete_config(self, req, config_meta):
"""
Deletes a config from Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about config
:raises HTTPBadRequest if x-config-name is missing
"""
self._enforce(req, 'delete_config')
try:
for id in eval(config_meta['config']):
registry.delete_config_metadata(req.context, id)
except exception.NotFound as e:
msg = (_("Failed to find config to delete: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete config: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_("config %(id)s could not be deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
else:
#self.notifier.info('config.delete', config)
return Response(body='', status=200)
@utils.mutating
def get_config(self, req, id):
"""
Returns metadata about an config in the HTTP headers of the
response object
:param req: The WSGI/Webob Request object
:param id: The opaque config identifier
:raises HTTPNotFound if config metadata is not available to user
"""
self._enforce(req, 'get_config')
config_meta = self.get_config_meta_or_404(req, id)
return {'config_meta': config_meta}
def detail(self, req):
"""
Returns detailed information for all available configs
:param req: The WSGI/Webob Request object
:retval The response body is a mapping of the following form::
{'configs': [
{'id': <ID>,
'name': <NAME>,
'description': <DESCRIPTION>,
'created_at': <TIMESTAMP>,
'updated_at': <TIMESTAMP>,
'deleted_at': <TIMESTAMP>|<NONE>,}, ...
]}
"""
self._enforce(req, 'get_configs')
params = self._get_query_params(req)
try:
configs = registry.get_configs_detail(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(configs=configs)
class ConfigDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result["config_meta"] = utils.get_config_meta(request)
return result
def add_config(self, request):
return self._deserialize(request)
def delete_config(self, request):
return self._deserialize(request)
class ConfigSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def add_config(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def delete_config(self, response, result):
config_meta = result['config_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(config=config_meta))
return response
def get_config(self, response, result):
config_meta = result['config_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(config=config_meta))
return response
def create_resource():
"""configs resource factory method"""
deserializer = ConfigDeserializer()
serializer = ConfigSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -0,0 +1,369 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glance_store as store
from oslo_log import log as logging
import webob.exc
from daisy.common import exception
from daisy import i18n
import daisy.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_ = i18n._
class BaseController(object):
def get_image_meta_or_404(self, request, image_id):
"""
Grabs the image metadata for an image with a supplied
identifier or raises an HTTPNotFound (404) response
:param request: The WSGI/Webob Request object
:param image_id: The opaque image identifier
:raises HTTPNotFound if image does not exist
"""
context = request.context
try:
return registry.get_image_metadata(context, image_id)
except exception.NotFound:
msg = "Image with identifier %s not found" % image_id
LOG.debug(msg)
raise webob.exc.HTTPNotFound(
msg, request=request, content_type='text/plain')
except exception.Forbidden:
msg = "Forbidden image access"
LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg,
request=request,
content_type='text/plain')
def get_host_meta_or_404(self, request, host_id):
"""
Grabs the host metadata for an host with a supplied
identifier or raises an HTTPNotFound (404) response
:param request: The WSGI/Webob Request object
:param host_id: The opaque host identifier
:raises HTTPNotFound if host does not exist
"""
context = request.context
try:
return registry.get_host_metadata(context, host_id)
except exception.NotFound:
msg = "Host with identifier %s not found" % host_id
LOG.debug(msg)
raise webob.exc.HTTPNotFound(
msg, request=request, content_type='text/plain')
except exception.Forbidden:
msg = "Forbidden host access"
LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg,
request=request,
content_type='text/plain')
def get_cluster_meta_or_404(self, request, cluster_id):
"""
Grabs the cluster metadata for an cluster with a supplied
identifier or raises an HTTPNotFound (404) response
:param request: The WSGI/Webob Request object
:param cluster_id: The opaque cluster identifier
:raises HTTPNotFound if cluster does not exist
"""
context = request.context
try:
return registry.get_cluster_metadata(context, cluster_id)
except exception.NotFound:
msg = "Cluster with identifier %s not found" % cluster_id
LOG.debug(msg)
raise webob.exc.HTTPNotFound(
msg, request=request, content_type='text/plain')
except exception.Forbidden:
msg = "Forbidden host access"
LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg,
request=request,
content_type='text/plain')
def get_component_meta_or_404(self, request, component_id):
"""
Grabs the component metadata for an component with a supplied
identifier or raises an HTTPNotFound (404) response
:param request: The WSGI/Webob Request object
:param component_id: The opaque component identifier
:raises HTTPNotFound if component does not exist
"""
context = request.context
try:
return registry.get_component_metadata(context, component_id)
except exception.NotFound:
msg = "Component with identifier %s not found" % component_id
LOG.debug(msg)
raise webob.exc.HTTPNotFound(
msg, request=request, content_type='text/plain')
except exception.Forbidden:
msg = "Forbidden host access"
LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg,
request=request,
content_type='text/plain')
def get_service_meta_or_404(self, request, service_id):
"""
Grabs the service metadata for an service with a supplied
identifier or raises an HTTPNotFound (404) response
:param request: The WSGI/Webob Request object
:param service_id: The opaque service identifier
:raises HTTPNotFound if service does not exist
"""
context = request.context
try:
return registry.get_service_metadata(context, service_id)
except exception.NotFound:
msg = "Service with identifier %s not found" % service_id
LOG.debug(msg)
raise webob.exc.HTTPNotFound(
msg, request=request, content_type='text/plain')
except exception.Forbidden:
msg = "Forbidden host access"
LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg,
request=request,
content_type='text/plain')
def get_role_meta_or_404(self, request, role_id):
"""
Grabs the role metadata for an role with a supplied
identifier or raises an HTTPNotFound (404) response
:param request: The WSGI/Webob Request object
:param role_id: The opaque role identifier
:raises HTTPNotFound if role does not exist
"""
context = request.context
try:
return registry.get_role_metadata(context, role_id)
except exception.NotFound:
msg = "Role with identifier %s not found" % role_id
LOG.debug(msg)
raise webob.exc.HTTPNotFound(
msg, request=request, content_type='text/plain')
except exception.Forbidden:
msg = "Forbidden host access"
LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg,
request=request,
content_type='text/plain')
def get_network_meta_or_404(self, request, network_id):
"""
Grabs the network metadata for an network with a supplied
identifier or raises an HTTPNotFound (404) response
:param request: The WSGI/Webob Request object
:param network_id: The opaque network identifier
:raises HTTPNotFound if network does not exist
"""
context = request.context
try:
return registry.get_network_metadata(context, network_id)
except exception.NotFound:
msg = "Network with identifier %s not found" % network_id
LOG.debug(msg)
raise webob.exc.HTTPNotFound(
msg, request=request, content_type='text/plain')
except exception.Forbidden:
msg = "Forbidden network access"
LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg,
request=request,
content_type='text/plain')
def get_active_image_meta_or_error(self, request, image_id):
"""
Same as get_image_meta_or_404 except that it will raise a 403 if the
image is deactivated or 404 if the image is otherwise not 'active'.
"""
image = self.get_image_meta_or_404(request, image_id)
if image['status'] == 'deactivated':
msg = "Image %s is deactivated" % image_id
LOG.debug(msg)
msg = _("Image %s is deactivated") % image_id
raise webob.exc.HTTPForbidden(
msg, request=request, content_type='type/plain')
if image['status'] != 'active':
msg = "Image %s is not active" % image_id
LOG.debug(msg)
msg = _("Image %s is not active") % image_id
raise webob.exc.HTTPNotFound(
msg, request=request, content_type='text/plain')
return image
def update_store_acls(self, req, image_id, location_uri, public=False):
if location_uri:
try:
read_tenants = []
write_tenants = []
members = registry.get_image_members(req.context, image_id)
if members:
for member in members:
if member['can_share']:
write_tenants.append(member['member_id'])
else:
read_tenants.append(member['member_id'])
store.set_acls(location_uri, public=public,
read_tenants=read_tenants,
write_tenants=write_tenants,
context=req.context)
except store.UnknownScheme:
msg = _("Store for image_id not found: %s") % image_id
raise webob.exc.HTTPBadRequest(explanation=msg,
request=req,
content_type='text/plain')
def get_config_file_meta_or_404(self, request, config_file_id):
"""
Grabs the config_file metadata for an config_file with a supplied
identifier or raises an HTTPNotFound (404) response
:param request: The WSGI/Webob Request object
:param host_id: The opaque config_file identifier
:raises HTTPNotFound if config_file does not exist
"""
context = request.context
try:
return registry.get_config_file_metadata(context, config_file_id)
except exception.NotFound:
msg = "config_file with identifier %s not found" % config_file_id
LOG.debug(msg)
raise webob.exc.HTTPNotFound(
msg, request=request, content_type='text/plain')
except exception.Forbidden:
msg = "Forbidden config_filke access"
LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg,
request=request,
content_type='text/plain')
def get_config_set_meta_or_404(self, request, config_set_id):
"""
Grabs the config_set metadata for an config_set with a supplied
identifier or raises an HTTPNotFound (404) response
:param request: The WSGI/Webob Request object
:param host_id: The opaque config_set identifier
:raises HTTPNotFound if config_set does not exist
"""
context = request.context
try:
return registry.get_config_set_metadata(context, config_set_id)
except exception.NotFound:
msg = "config_set with identifier %s not found" % config_set_id
LOG.debug(msg)
raise webob.exc.HTTPNotFound(
msg, request=request, content_type='text/plain')
except exception.Forbidden:
msg = "Forbidden config_set access"
LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg,
request=request,
content_type='text/plain')
def get_config_meta_or_404(self, request, config_id):
"""
Grabs the config metadata for an config with a supplied
identifier or raises an HTTPNotFound (404) response
:param request: The WSGI/Webob Request object
:param host_id: The opaque config identifier
:raises HTTPNotFound if config does not exist
"""
context = request.context
try:
return registry.get_config_metadata(context, config_id)
except exception.NotFound:
msg = "config with identifier %s not found" % config_id
LOG.debug(msg)
raise webob.exc.HTTPNotFound(
msg, request=request, content_type='text/plain')
except exception.Forbidden:
msg = "Forbidden config access"
LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg,
request=request,
content_type='text/plain')
def get_service_disk_meta_or_404(self, request, id):
"""
Grabs the config metadata for an config with a supplied
identifier or raises an HTTPNotFound (404) response
:param request: The WSGI/Webob Request object
:param host_id: The opaque config identifier
:raises HTTPNotFound if config does not exist
"""
context = request.context
try:
return registry.get_service_disk_detail_metadata(context, id)
except exception.NotFound:
msg = "config with identifier %s not found" % id
LOG.debug(msg)
raise webob.exc.HTTPNotFound(
msg, request=request, content_type='text/plain')
except exception.Forbidden:
msg = "Forbidden config access"
LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg,
request=request,
content_type='text/plain')
def get_cinder_volume_meta_or_404(self, request, id):
"""
Grabs the config metadata for an config with a supplied
identifier or raises an HTTPNotFound (404) response
:param request: The WSGI/Webob Request object
:param host_id: The opaque config identifier
:raises HTTPNotFound if config does not exist
"""
context = request.context
try:
return registry.get_cinder_volume_detail_metadata(context, id)
except exception.NotFound:
msg = "config with identifier %s not found" % id
LOG.debug(msg)
raise webob.exc.HTTPNotFound(
msg, request=request, content_type='text/plain')
except exception.Forbidden:
msg = "Forbidden config access"
LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg,
request=request,
content_type='text/plain')

View File

@ -0,0 +1,668 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/hosts endpoint for Daisy v1 API
"""
import time
import traceback
import ast
import webob.exc
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from webob.exc import HTTPServerError
from webob import Response
from threading import Thread
from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
import daisy.registry.client.v1.api as registry
from daisy.api.v1 import controller
from daisy.api.v1 import filters
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
SERVICE_DISK_SERVICE = ('db', 'glance', 'dbbackup', 'mongodb', 'nova')
DISK_LOCATION = ('local', 'share')
CINDER_VOLUME_BACKEND_PARAMS = ('management_ips', 'data_ips','pools',
'volume_driver', 'volume_type',
'role_id', 'user_name','user_pwd')
CINDER_VOLUME_BACKEND_DRIVER = ['KS3200_IPSAN', 'KS3200_FCSAN',
'FUJISTU_ETERNUS']
class Controller(controller.BaseController):
"""
WSGI controller for hosts resource in Daisy v1 API
The hosts resource API is a RESTful web service for host data. The API
is as follows::
GET /hosts -- Returns a set of brief metadata about hosts
GET /hosts/detail -- Returns a set of detailed metadata about
hosts
HEAD /hosts/<ID> -- Return metadata about an host with id <ID>
GET /hosts/<ID> -- Return host data for host with id <ID>
POST /hosts -- Store host data and return metadata about the
newly-stored host
PUT /hosts/<ID> -- Update host metadata and/or upload host
data for a previously-reserved host
DELETE /hosts/<ID> -- Delete the host with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
def _raise_404_if_role_deleted(self, req, role_id):
role = self.get_role_meta_or_404(req, role_id)
if role is None or role['deleted']:
msg = _("role with identifier %s has been deleted.") % role_id
raise HTTPNotFound(msg)
if role['type'] == 'template':
msg = "role type of %s is 'template'" % role_id
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
def _raise_404_if_service_disk_deleted(self, req, service_disk_id):
service_disk = self.get_service_disk_meta_or_404(req, service_disk_id)
if service_disk is None or service_disk['deleted']:
msg = _("service_disk with identifier %s has been deleted.") % service_disk_id
raise HTTPNotFound(msg)
def _default_value_set(self, disk_meta):
if (not disk_meta.has_key('disk_location') or
not disk_meta['disk_location'] or
disk_meta['disk_location'] == ''):
disk_meta['disk_location'] = 'local'
if not disk_meta.has_key('lun'):
disk_meta['lun'] = 0
if not disk_meta.has_key('size'):
disk_meta['size'] = -1
def _unique_service_in_role(self, req, disk_meta):
params = {'filters': {'role_id': disk_meta['role_id']}}
service_disks = registry.list_service_disk_metadata(req.context, **params)
for service_disk in service_disks:
if service_disk['service'] == disk_meta['service']:
msg = "disk service %s has existed in role %s" %(disk_meta['service'], disk_meta['role_id'])
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
def _service_disk_add_meta_valid(self, req, disk_meta):
if not disk_meta.has_key('role_id'):
msg = "'role_id' must be given"
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
else:
self._raise_404_if_role_deleted(req,disk_meta['role_id'])
if not disk_meta.has_key('service'):
msg = "'service' must be given"
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
else:
if disk_meta['service'] not in SERVICE_DISK_SERVICE:
msg = "service '%s' is not supported" % disk_meta['service']
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
if disk_meta['disk_location'] not in DISK_LOCATION:
msg = "disk_location %s is not supported" % disk_meta['disk_location']
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
if disk_meta['disk_location'] == 'share' and not disk_meta.has_key('data_ips'):
msg = "'data_ips' must be given when disk_location is share"
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
if disk_meta['lun'] < 0:
msg = "'lun' should not be less than 0"
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
disk_meta['size'] = ast.literal_eval(str(disk_meta['size']))
if not isinstance(disk_meta['size'], int):
msg = "'size' is not integer"
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
if disk_meta['size'] < -1:
msg = "'size' is invalid"
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
self._unique_service_in_role(req, disk_meta)
def _service_disk_update_meta_valid(self, req, id, disk_meta):
orig_disk_meta = self.get_service_disk_meta_or_404(req, id)
if disk_meta.has_key('role_id'):
self._raise_404_if_role_deleted(req,disk_meta['role_id'])
if disk_meta.has_key('service'):
if disk_meta['service'] not in SERVICE_DISK_SERVICE:
msg = "service '%s' is not supported" % disk_meta['service']
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
if disk_meta.has_key('disk_location'):
if disk_meta['disk_location'] not in DISK_LOCATION:
msg = "disk_location '%s' is not supported" % disk_meta['disk_location']
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
if (disk_meta['disk_location'] == 'share' and
not disk_meta.has_key('data_ips') and
not orig_disk_meta['data_ips']):
msg = "'data_ips' must be given when disk_location is share"
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
if disk_meta.has_key('size'):
disk_meta['size'] = ast.literal_eval(str(disk_meta['size']))
if not isinstance(disk_meta['size'], int):
msg = "'size' is not integer"
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
if disk_meta['size'] < -1:
msg = "'size' is invalid"
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
@utils.mutating
def service_disk_add(self, req, disk_meta):
"""
Export daisy db data to tecs.conf and HA.conf.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
self._enforce(req, 'service_disk_add')
self._default_value_set(disk_meta)
self._service_disk_add_meta_valid(req, disk_meta)
service_disk_meta = registry.add_service_disk_metadata(req.context, disk_meta)
return {'disk_meta': service_disk_meta}
@utils.mutating
def service_disk_delete(self, req, id):
"""
Deletes a service_disk from Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about service_disk
:raises HTTPBadRequest if x-service-disk-name is missing
"""
self._enforce(req, 'delete_service_disk')
try:
registry.delete_service_disk_metadata(req.context, id)
except exception.NotFound as e:
msg = (_("Failed to find service_disk to delete: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete service_disk: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_("service_disk %(id)s could not be deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
else:
return Response(body='', status=200)
@utils.mutating
def service_disk_update(self, req, id, disk_meta):
self._enforce(req, 'service_disk_update')
self._service_disk_update_meta_valid(req, id, disk_meta)
try:
service_disk_meta = registry.update_service_disk_metadata(req.context,
id,
disk_meta)
except exception.Invalid as e:
msg = (_("Failed to update role metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find role to update: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update role: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('Host operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('role.update', service_disk_meta)
return {'disk_meta': service_disk_meta}
@utils.mutating
def service_disk_detail(self, req, id):
"""
Returns metadata about an role in the HTTP headers of the
response object
:param req: The WSGI/Webob Request object
:param id: The opaque role identifier
:raises HTTPNotFound if role metadata is not available to user
"""
self._enforce(req, 'service_disk_detail')
service_disk_meta = self.get_service_disk_meta_or_404(req, id)
return {'disk_meta': service_disk_meta}
def service_disk_list(self, req):
self._enforce(req, 'service_disk_list')
params = self._get_query_params(req)
filters=params.get('filters',None)
if 'role_id' in filters:
role_id=filters['role_id']
self._raise_404_if_role_deleted(req, role_id)
try:
service_disks = registry.list_service_disk_metadata(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(disk_meta=service_disks)
def _cinder_volume_list(self, req, params):
try:
cinder_volumes = registry.list_cinder_volume_metadata(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return cinder_volumes
def _is_cinder_volume_repeat(self, req, array_disk_info, update_id = None):
cinder_volume_id = None
params = {'filters': {}}
if update_id:
cinder_volume_metal = self.get_cinder_volume_meta_or_404(req, update_id)
new_management_ips = array_disk_info.get('management_ips', cinder_volume_metal['management_ips']).split(",")
new_pools = array_disk_info.get('pools', cinder_volume_metal['pools']).split(",")
else:
new_management_ips = array_disk_info['management_ips'].split(",")
new_pools = array_disk_info['pools'].split(",")
org_cinder_volumes = self._cinder_volume_list(req, params)
for cinder_volume in org_cinder_volumes:
if (set(cinder_volume['management_ips'].split(",")) == set(new_management_ips) and
set(cinder_volume['pools'].split(",")) == set(new_pools)):
if cinder_volume['id'] != update_id:
msg = 'cinder_volume array disks conflict with cinder_volume %s' % cinder_volume['id']
raise HTTPBadRequest(explanation=msg, request=req)
def _get_cinder_volume_backend_index(self, req, disk_array):
params = {'filters': {}}
cinder_volumes = self._cinder_volume_list(req, params)
index = 1
while True:
backend_index = "%s-%s" %(disk_array['volume_driver'], index)
flag = True
for cinder_volume in cinder_volumes:
if backend_index == cinder_volume['backend_index']:
index=index+1
flag = False
break
if flag:
break
return backend_index
@utils.mutating
def cinder_volume_add(self, req, disk_meta):
"""
Export daisy db data to tecs.conf and HA.conf.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
self._enforce(req, 'cinder_volume_add')
if not disk_meta.has_key('role_id'):
msg = "'role_id' must be given"
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
else:
self._raise_404_if_role_deleted(req,disk_meta['role_id'])
disk_arrays = eval(disk_meta['disk_array'])
for disk_array in disk_arrays:
for key in disk_array.keys():
if (key not in CINDER_VOLUME_BACKEND_PARAMS and
key != 'data_ips'):
msg = "'%s' must be given for cinder volume config" % key
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
if disk_array['volume_driver'] not in CINDER_VOLUME_BACKEND_DRIVER:
msg = "volume_driver %s is not supported" % disk_array['volume_driver']
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
if (disk_array['volume_driver'] == 'FUJISTU_ETERNUS' and
(not disk_array.has_key('data_ips') or
not disk_array['data_ips'])):
msg = "data_ips must be given when using FUJISTU Disk Array"
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
self._is_cinder_volume_repeat(req, disk_array)
disk_array['role_id'] = disk_meta['role_id']
disk_array['backend_index'] = self._get_cinder_volume_backend_index(req, disk_array)
cinder_volumes = registry.add_cinder_volume_metadata(req.context, disk_array)
return {'disk_meta': cinder_volumes}
@utils.mutating
def cinder_volume_delete(self, req, id):
"""
Deletes a service_disk from Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about service_disk
:raises HTTPBadRequest if x-service-disk-name is missing
"""
self._enforce(req, 'delete_cinder_volume')
try:
registry.delete_cinder_volume_metadata(req.context, id)
except exception.NotFound as e:
msg = (_("Failed to find cinder volume to delete: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete cinder volume: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_("cindre volume %(id)s could not be deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
else:
return Response(body='', status=200)
def _is_data_ips_valid(self, req, update_id, update_meta):
orgin_cinder_volume = self.get_cinder_volume_meta_or_404(req, update_id)
new_driver = update_meta.get('volume_driver',
orgin_cinder_volume['volume_driver'])
if new_driver != 'FUJISTU_ETERNUS':
return
new_data_ips = update_meta.get('data_ips',
orgin_cinder_volume['data_ips'])
if not new_data_ips:
msg = "data_ips must be given when using FUJISTU Disk Array"
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
@utils.mutating
def cinder_volume_update(self, req, id, disk_meta):
for key in disk_meta.keys():
if key not in CINDER_VOLUME_BACKEND_PARAMS:
msg = "'%s' must be given for cinder volume config" % key
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
if disk_meta.has_key('role_id'):
self._raise_404_if_role_deleted(req,disk_meta['role_id'])
if (disk_meta.has_key('volume_driver') and
disk_meta['volume_driver'] not in CINDER_VOLUME_BACKEND_DRIVER):
msg = "volume_driver %s is not supported" % disk_meta['volume_driver']
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
self._is_cinder_volume_repeat(req, disk_meta, id)
self._is_data_ips_valid(req, id, disk_meta)
try:
cinder_volume_meta = registry.update_cinder_volume_metadata(req.context,
id,
disk_meta)
except exception.Invalid as e:
msg = (_("Failed to update cinder_volume metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find cinder_volume to update: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update cinder_volume: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('cinder_volume operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('cinder_volume.update', cinder_volume_meta)
return {'disk_meta': cinder_volume_meta}
@utils.mutating
def cinder_volume_detail(self, req, id):
"""
Returns metadata about an role in the HTTP headers of the
response object
:param req: The WSGI/Webob Request object
:param id: The opaque role identifier
:raises HTTPNotFound if role metadata is not available to user
"""
self._enforce(req, 'cinder_volume_detail')
cinder_volume_meta = self.get_cinder_volume_meta_or_404(req, id)
return {'disk_meta': cinder_volume_meta}
def cinder_volume_list(self, req):
self._enforce(req, 'cinder_volume_list')
params = self._get_query_params(req)
filters=params.get('filters',None)
if 'role_id' in filters:
role_id=filters['role_id']
self._raise_404_if_role_deleted(req, role_id)
cinder_volumes = self._cinder_volume_list(req, params)
return dict(disk_meta=cinder_volumes)
class DiskArrayDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result["disk_meta"] = utils.get_dict_meta(request)
return result
def service_disk_add(self, request):
return self._deserialize(request)
def service_disk_update(self, request):
return self._deserialize(request)
def cinder_volume_add(self, request):
return self._deserialize(request)
def cinder_volume_update(self, request):
return self._deserialize(request)
class DiskArraySerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def service_disk_add(self, response, result):
disk_meta = result['disk_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def service_disk_update(self, response, result):
disk_meta = result['disk_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def cinder_volume_add(self, response, result):
disk_meta = result['disk_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def cinder_volume_update(self, response, result):
disk_meta = result['disk_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def create_resource():
"""Image members resource factory method"""
deserializer = DiskArrayDeserializer()
serializer = DiskArraySerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -0,0 +1,40 @@
# Copyright 2012, Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def validate(filter, value):
return FILTER_FUNCTIONS.get(filter, lambda v: True)(value)
def validate_int_in_range(min=0, max=None):
def _validator(v):
try:
if max is None:
return min <= int(v)
return min <= int(v) <= max
except ValueError:
return False
return _validator
def validate_boolean(v):
return v.lower() in ('none', 'true', 'false', '1', '0')
FILTER_FUNCTIONS = {'size_max': validate_int_in_range(), # build validator
'size_min': validate_int_in_range(), # build validator
'min_ram': validate_int_in_range(), # build validator
'protected': validate_boolean,
'is_public': validate_boolean, }

View File

@ -0,0 +1,569 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/host_Templates endpoint for Daisy v1 API
"""
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound
from webob import Response
import copy
import json
from daisy.api import policy
import daisy.api.v1
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy import i18n
from daisy import notifier
import daisy.registry.client.v1.api as registry
from daisy.registry.api.v1 import template
import daisy.api.backends.tecs.common as tecs_cmn
import daisy.api.backends.common as daisy_cmn
try:
import simplejson as json
except ImportError:
import json
daisy_tecs_path = tecs_cmn.daisy_tecs_path
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = template.SUPPORTED_PARAMS
SUPPORTED_FILTERS = template.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController):
"""
WSGI controller for Templates resource in Daisy v1 API
The HostTemplates resource API is a RESTful web Template for Template data. The API
is as follows::
GET /HostTemplates -- Returns a set of brief metadata about Templates
GET /HostTemplates/detail -- Returns a set of detailed metadata about
HostTemplates
HEAD /HostTemplates/<ID> -- Return metadata about an Template with id <ID>
GET /HostTemplates/<ID> -- Return Template data for Template with id <ID>
POST /HostTemplates -- Store Template data and return metadata about the
newly-stored Template
PUT /HostTemplates/<ID> -- Update Template metadata and/or upload Template
data for a previously-reserved Template
DELETE /HostTemplates/<ID> -- Delete the Template with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']:
msg = _("Cluster with identifier %s has been deleted.") % cluster_id
raise webob.exc.HTTPNotFound(msg)
@utils.mutating
def add_template(self, req, host_template):
"""
Adds a new cluster template to Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about Template
:raises HTTPBadRequest if x-Template-name is missing
"""
self._enforce(req, 'add_host_template')
template_name = host_template["name"]
host_template = registry.add_host_template_metadata(req.context, host_template)
return {'host_template': template}
@utils.mutating
def update_host_template(self, req, template_id, host_template):
"""
Updates an existing Template with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'update_host_template')
#orig_Template_meta = self.get_Template_meta_or_404(req, id)
'''
if orig_Template_meta['deleted']:
msg = _("Forbidden to update deleted Template.")
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
'''
try:
host_template = registry.update_host_template_metadata(req.context,
template_id,
host_template)
except exception.Invalid as e:
msg = (_("Failed to update template metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find host_template to update: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update host_template: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('host_template operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('host_template.update', host_template)
return {'host_template': host_template}
def _filter_params(self, host_meta):
for key in host_meta.keys():
if key=="id" or key=="updated_at" or key=="deleted_at" or key=="created_at" or key=="deleted":
del host_meta[key]
if host_meta.has_key("memory"):
del host_meta['memory']
if host_meta.has_key("system"):
del host_meta['system']
if host_meta.has_key("disks"):
del host_meta['disks']
if host_meta.has_key("os_status"):
del host_meta['os_status']
if host_meta.has_key("status"):
del host_meta['status']
if host_meta.has_key("messages"):
del host_meta['messages']
if host_meta.has_key("cpu"):
del host_meta['cpu']
if host_meta.has_key("ipmi_addr"):
del host_meta['ipmi_addr']
if host_meta.has_key("interfaces"):
for interface in host_meta['interfaces']:
for key in interface.keys():
if key=="id" or key=="updated_at" or key=="deleted_at" \
or key=="created_at" or key=="deleted" or key=="current_speed" \
or key=="max_speed" or key=="host_id" or key=="state":
del interface[key]
for assigned_network in interface['assigned_networks']:
if assigned_network.has_key("ip"):
assigned_network['ip'] = ""
return host_meta
@utils.mutating
def get_host_template_detail(self, req, template_id):
"""
delete a existing cluster template with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'get_host_template_detail')
try:
host_template = registry.host_template_detail_metadata(req.context, template_id)
return {'host_template': host_template}
except exception.NotFound as e:
msg = (_("Failed to find host template: %s") %
utils.exception_to_str(e))
LOG.error(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to get host template: %s") %
utils.exception_to_str(e))
LOG.error(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_("host template %(id)s could not be get because it is in use: "
"%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)})
LOG.error(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
else:
#self.notifier.info('host.delete', host)
return Response(body='', status=200)
@utils.mutating
def get_host_template_lists(self, req):
self._enforce(req, 'get_template_lists')
params = self._get_query_params(req)
template_meta = {}
try:
host_template_lists = registry.host_template_lists_metadata(req.context, **params)
if host_template_lists and host_template_lists[0]:
template_meta = json.loads(host_template_lists[0]['hosts'])
return {'host_template': template_meta}
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(host_template=host_template_lists)
@utils.mutating
def host_to_template(self, req, host_template):
"""
host to Template.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-Template-cluster is missing
"""
self._enforce(req, 'host_to_template')
if host_template.get('host_id', None):
origin_host_meta = self.get_host_meta_or_404(req, host_template['host_id'])
host_meta = self._filter_params(origin_host_meta)
if host_template.get('host_template_name', None) and host_template.get('cluster_name', None):
host_meta['name'] = host_template['host_template_name']
host_meta['description'] = host_template.get('description', None)
params = {'filters':{'cluster_name':host_template['cluster_name']}}
templates = registry.host_template_lists_metadata(req.context, **params)
if templates and templates[0]:
had_host_template = False
if templates[0]['hosts']:
templates[0]['hosts'] = json.loads(templates[0]['hosts'])
else:
templates[0]['hosts'] = []
for index in range(len(templates[0]['hosts'])):
if host_template['host_template_name'] == templates[0]['hosts'][index]['name']:
had_host_template = True
templates[0]['hosts'][index] = host_meta
break
if not had_host_template:
host_meta['name'] = host_template['host_template_name']
templates[0]['hosts'].append(host_meta)
templates[0]['hosts'] = json.dumps(templates[0]['hosts'])
host_template = registry.update_host_template_metadata(req.context,
templates[0]['id'],
templates[0])
else:
param = {"cluster_name": host_template['cluster_name'], "hosts":json.dumps([host_meta])}
host_template = registry.add_host_template_metadata(req.context, param)
return {'host_template': host_template}
@utils.mutating
def template_to_host(self, req, host_template):
if not host_template.get('cluster_name', None):
msg = "cluster name is null"
raise HTTPNotFound(explanation=msg)
params = {'filters':{'cluster_name':host_template['cluster_name']}}
templates = registry.host_template_lists_metadata(req.context, **params)
hosts_param = []
host_template_used = {}
if templates and templates[0]:
hosts_param = json.loads(templates[0]['hosts'])
for host in hosts_param:
if host['name'] == host_template['host_template_name']:
host_template_used = host
break
if not host_template_used:
msg = "not host_template %s" % host_template['host_template_name']
raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain")
if host_template.get('host_id', None):
self.get_host_meta_or_404(req, host_template['host_id'])
else:
msg="host_id is not null"
raise HTTPBadRequest(explanation = msg)
host_id = host_template['host_id']
params = {'filters':{'name': host_template['cluster_name']}}
clusters = registry.get_clusters_detail(req.context, **params)
if clusters and clusters[0]:
host_template_used['cluster'] = clusters[0]['id']
if host_template_used.has_key('role') and host_template_used['role']:
role_id_list = []
host_role_list = []
if host_template_used.has_key('cluster'):
params = self._get_query_params(req)
role_list = registry.get_roles_detail(req.context, **params)
for role_name in role_list:
if role_name['cluster_id'] == host_template_used['cluster']:
host_role_list = list(host_template_used['role'])
if role_name['name'] in host_role_list:
role_id_list.append(role_name['id'])
host_template_used['role'] = role_id_list
if host_template_used.has_key('name'):
host_template_used.pop('name')
if host_template_used.has_key('dmi_uuid'):
host_template_used.pop('dmi_uuid')
if host_template_used.has_key('ipmi_user'):
host_template_used.pop('ipmi_user')
if host_template_used.has_key('ipmi_passwd'):
host_template_used.pop('ipmi_passwd')
if host_template_used.has_key('ipmi_addr'):
host_template_used.pop('ipmi_addr')
host_template_interfaces = host_template_used.get('interfaces', None)
if host_template_interfaces:
template_ether_interface = [interface for interface in host_template_interfaces if interface['type'] == "ether" ]
orig_host_meta = registry.get_host_metadata(req.context, host_id)
orig_host_interfaces = orig_host_meta.get('interfaces', None)
temp_orig_host_interfaces = [ interface for interface in orig_host_interfaces if interface['type'] == "ether" ]
if len(temp_orig_host_interfaces) != len(template_ether_interface):
msg = (_('host_id %s does not match the host_id host_template '
'%s.') % (host_id, host_template['host_template_name']))
raise HTTPBadRequest(explanation = msg)
interface_match_flag = 0
for host_template_interface in host_template_interfaces:
if host_template_interface['type'] == 'ether':
for orig_host_interface in orig_host_interfaces:
if orig_host_interface['pci'] == host_template_interface['pci']:
interface_match_flag += 1
host_template_interface['mac'] = orig_host_interface['mac']
if host_template_interface.has_key('ip'):
host_template_interface.pop('ip')
if interface_match_flag != len(template_ether_interface):
msg = (_('host_id %s does not match the host '
'host_template %s.') % (host_id, host_template['host_template_name']))
raise HTTPBadRequest(explanation=msg)
host_template_used['interfaces'] = str(host_template_interfaces)
host_template = registry.update_host_metadata(req.context, host_id, host_template_used)
return {"host_template": host_template}
@utils.mutating
def delete_host_template(self, req, host_template):
"""
delete a existing host template with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'delete_host_template')
try:
if not host_template.get('cluster_name', None):
msg = "cluster name is null"
raise HTTPNotFound(explanation=msg)
params = {'filters':{'cluster_name':host_template['cluster_name']}}
host_templates = registry.host_template_lists_metadata(req.context, **params)
template_param = []
had_host_template = False
if host_templates and host_templates[0]:
template_param = json.loads(host_templates[0]['hosts'])
for host in template_param:
if host['name'] == host_template['host_template_name']:
template_param.remove(host)
had_host_template = True
break
if not had_host_template:
msg = "not host template name %s" %host_template['host_template_name']
raise HTTPNotFound(explanation=msg)
else:
host_templates[0]['hosts'] = json.dumps(template_param)
host_template = registry.update_host_template_metadata(req.context,
host_templates[0]['id'],
host_templates[0])
return {"host_template": host_template}
else:
msg = "host template cluster name %s is null" %host_template['cluster_name']
raise HTTPNotFound(explanation=msg)
except exception.NotFound as e:
msg = (_("Failed to find host template to delete: %s") %
utils.exception_to_str(e))
LOG.error(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete template: %s") %
utils.exception_to_str(e))
LOG.error(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_("template %(id)s could not be deleted because it is in use: "
"%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)})
LOG.error(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
else:
return Response(body='', status=200)
class HostTemplateDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result["host_template"] = utils.get_template_meta(request)
return result
def add_host_template(self, request):
return self._deserialize(request)
def update_host_template(self, request):
return self._deserialize(request)
def host_to_template(self, request):
return self._deserialize(request)
def template_to_host(self, request):
return self._deserialize(request)
def delete_host_template(self, request):
return self._deserialize(request)
class HostTemplateSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def add_host_template(self, response, result):
host_template = result['host_template']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(host_template=host_template))
return response
def delete_host_template(self, response, result):
host_template = result['host_template']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(host_template=host_template))
return response
def get_host_template_detail(self, response, result):
host_template = result['host_template']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(host_template=host_template))
return response
def update_host_template(self, response, result):
host_template = result['host_template']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(host_template=host_template))
return response
def host_to_template(self, response, result):
host_template = result['host_template']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(host_template=host_template))
return response
def template_to_host(self, response, result):
host_template = result['host_template']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(host_template=host_template))
return response
def get_host_template_lists(self, response, result):
host_template = result['host_template']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(host_template=host_template))
def create_resource():
"""Templates resource factory method"""
deserializer = HostTemplateDeserializer()
serializer = HostTemplateSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

1728
code/daisy/daisy/api/v1/hosts.py Executable file

File diff suppressed because it is too large Load Diff

1264
code/daisy/daisy/api/v1/images.py Executable file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,405 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/hosts endpoint for Daisy v1 API
"""
import time
import traceback
import webob.exc
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from webob.exc import HTTPServerError
from threading import Thread
from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
import daisy.registry.client.v1.api as registry
from daisy.api.v1 import controller
from daisy.api.v1 import filters
import daisy.api.backends.common as daisy_cmn
from daisy.api.backends import driver
from daisy.api.backends import os as os_handle
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
# if some backends have order constraint, please add here
# if backend not in the next three order list, we will be
# think it does't have order constraint.
BACKENDS_INSTALL_ORDER = ['proton', 'zenic', 'tecs']
BACKENDS_UPGRADE_ORDER = ['proton', 'zenic', 'tecs']
BACKENDS_UNINSTALL_ORDER = []
def get_deployment_backends(req, cluster_id, backends_order):
cluster_roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id)
cluster_backends = set([role['deployment_backend'] for role in cluster_roles if daisy_cmn.get_hosts_of_role(req, role['id'])])
ordered_backends = [backend for backend in backends_order if backend in cluster_backends]
other_backends = [backend for backend in cluster_backends if backend not in backends_order]
deployment_backends =ordered_backends + other_backends
return deployment_backends
class InstallTask(object):
"""
Class for install OS and TECS.
"""
""" Definition for install states."""
def __init__(self, req, cluster_id):
self.req = req
self.cluster_id = cluster_id
def _backends_install(self):
backends = get_deployment_backends(self.req, self.cluster_id, BACKENDS_INSTALL_ORDER)
if not backends:
LOG.info(_("No backends need to install."))
return
for backend in backends:
backend_driver = driver.load_deployment_dirver(backend)
backend_driver.install(self.req, self.cluster_id)
# this will be raise raise all the exceptions of the thread to log file
def run(self):
try:
self._run()
except Exception as e:
LOG.exception(e.message)
def _run(self):
"""
Exectue os installation with sync mode.
:return:
"""
# get hosts config which need to install OS
all_hosts_need_os = os_handle.get_cluster_hosts_config(self.req, self.cluster_id)
if all_hosts_need_os:
hosts_with_role_need_os = [host_detail for host_detail in all_hosts_need_os if host_detail['status'] == 'with-role']
hosts_without_role_need_os = [host_detail for host_detail in all_hosts_need_os if host_detail['status'] != 'with-role']
else:
LOG.info(_("No host need to install os, begin to install "
"backends for cluster %s." % self.cluster_id))
self._backends_install()
return
run_once_flag = True
# if no hosts with role need os, install backend applications immediately
if not hosts_with_role_need_os:
run_once_flag = False
role_hosts_need_os = []
LOG.info(_("All of hosts with role is 'active', begin to install "
"backend applications for cluster %s first." % self.cluster_id))
self._backends_install()
else:
role_hosts_need_os = [host_detail['id'] for host_detail in hosts_with_role_need_os]
# hosts with role put the head of the list
order_hosts_need_os = hosts_with_role_need_os + hosts_without_role_need_os
while order_hosts_need_os:
os_install = os_handle.OSInstall(self.req, self.cluster_id)
#all os will be installed batch by batch with max_parallel_os_number which was set in daisy-api.conf
(order_hosts_need_os,role_hosts_need_os) = os_install.install_os(order_hosts_need_os,role_hosts_need_os)
# after a batch of os install over, judge if all role hosts install os completely,
# if role_hosts_need_os is empty, install TECS immediately
if run_once_flag and not role_hosts_need_os:
run_once_flag = False
#wait to reboot os after new os installed
time.sleep(10)
LOG.info(_("All hosts with role install successfully, "
"begin to install backend applications for cluster %s." % self.cluster_id))
self._backends_install()
class Controller(controller.BaseController):
"""
WSGI controller for hosts resource in Daisy v1 API
The hosts resource API is a RESTful web service for host data. The API
is as follows::
GET /hosts -- Returns a set of brief metadata about hosts
GET /hosts/detail -- Returns a set of detailed metadata about
hosts
HEAD /hosts/<ID> -- Return metadata about an host with id <ID>
GET /hosts/<ID> -- Return host data for host with id <ID>
POST /hosts -- Store host data and return metadata about the
newly-stored host
PUT /hosts/<ID> -- Update host metadata and/or upload host
data for a previously-reserved host
DELETE /hosts/<ID> -- Delete the host with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']:
msg = _("Cluster with identifier %s has been deleted.") % cluster_id
raise webob.exc.HTTPNotFound(msg)
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
@utils.mutating
def install_cluster(self, req, install_meta):
"""
Install TECS to a cluster.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
cluster_id = install_meta['cluster_id']
self._enforce(req, 'install_cluster')
self._raise_404_if_cluster_deleted(req, cluster_id)
if install_meta.get("deployment_interface", None):
os_handle.pxe_server_build(req, install_meta)
return {"status": "pxe is installed"}
# if have hosts need to install os, TECS installataion executed in InstallTask
os_install_obj = InstallTask(req, cluster_id)
os_install_thread = Thread(target=os_install_obj.run)
os_install_thread.start()
return {"status":"begin install"}
@utils.mutating
def uninstall_cluster(self, req, cluster_id):
"""
Uninstall TECS to a cluster.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
self._enforce(req, 'uninstall_cluster')
self._raise_404_if_cluster_deleted(req, cluster_id)
backends = get_deployment_backends(req, cluster_id, BACKENDS_UNINSTALL_ORDER)
for backend in backends:
backend_driver = driver.load_deployment_dirver(backend)
uninstall_thread = Thread(target=backend_driver.uninstall, args=(req, cluster_id))
uninstall_thread.start()
return {"status":"begin uninstall"}
@utils.mutating
def uninstall_progress(self, req, cluster_id):
self._enforce(req, 'uninstall_progress')
self._raise_404_if_cluster_deleted(req, cluster_id)
all_nodes = {}
backends = get_deployment_backends(req, cluster_id, BACKENDS_UNINSTALL_ORDER)
if not backends:
LOG.info(_("No backends need to uninstall."))
return all_nodes
for backend in backends:
backend_driver = driver.load_deployment_dirver(backend)
nodes_process = backend_driver.uninstall_progress(req, cluster_id)
all_nodes.update(nodes_process)
return all_nodes
@utils.mutating
def update_cluster(self, req, cluster_id):
"""
Uninstall TECS to a cluster.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
self._enforce(req, 'update_cluster')
self._raise_404_if_cluster_deleted(req, cluster_id)
backends = get_deployment_backends(req, cluster_id, BACKENDS_UPGRADE_ORDER)
if not backends:
LOG.info(_("No backends need to update."))
return {"status":""}
for backend in backends:
backend_driver = driver.load_deployment_dirver(backend)
update_thread = Thread(target=backend_driver.upgrade, args=(req, cluster_id))
update_thread.start()
return {"status":"begin update"}
@utils.mutating
def update_progress(self, req, cluster_id):
self._enforce(req, 'update_progress')
self._raise_404_if_cluster_deleted(req, cluster_id)
backends = get_deployment_backends(req, cluster_id, BACKENDS_UPGRADE_ORDER)
all_nodes = {}
for backend in backends:
backend_driver = driver.load_deployment_dirver(backend)
nodes_process = backend_driver.upgrade_progress(req, cluster_id)
all_nodes.update(nodes_process)
return all_nodes
@utils.mutating
def export_db(self, req, install_meta):
"""
Export daisy db data to tecs.conf and HA.conf.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing
"""
self._enforce(req, 'export_db')
cluster_id = install_meta['cluster_id']
self._raise_404_if_cluster_deleted(req, cluster_id)
all_config_files = {}
backends = get_deployment_backends(req, cluster_id, BACKENDS_INSTALL_ORDER)
if not backends:
LOG.info(_("No backends need to export."))
return all_config_files
for backend in backends:
backend_driver = driver.load_deployment_dirver(backend)
backend_config_files = backend_driver.export_db(req, cluster_id)
all_config_files.update(backend_config_files)
return all_config_files
@utils.mutating
def update_disk_array(self, req, cluster_id):
"""
update TECS Disk Array config for a cluster.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-cluster is missing
"""
self._enforce(req, 'update_disk_array')
self._raise_404_if_cluster_deleted(req, cluster_id)
tecs_backend_name = 'tecs'
backends = get_deployment_backends(req, cluster_id, BACKENDS_UNINSTALL_ORDER)
if tecs_backend_name not in backends:
message = "No tecs backend"
LOG.info(_(message))
else:
backend_driver = driver.load_deployment_dirver(tecs_backend_name)
message = backend_driver.update_disk_array(req, cluster_id)
return {'status':message}
class InstallDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result["install_meta"] = utils.get_dict_meta(request)
return result
def install_cluster(self, request):
return self._deserialize(request)
def export_db(self, request):
return self._deserialize(request)
def update_disk_array(self, request):
return {}
class InstallSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def install_cluster(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def export_db(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def update_disk_array(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def create_resource():
"""Image members resource factory method"""
deserializer = InstallDeserializer()
serializer = InstallSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -0,0 +1,278 @@
# Copyright 2012 OpenStack Foundation.
# Copyright 2013 NTT corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import webob.exc
from daisy.api import policy
from daisy.api.v1 import controller
from daisy.common import exception
from daisy.common import utils
from daisy.common import wsgi
from daisy import i18n
import daisy.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_ = i18n._
CONF = cfg.CONF
CONF.import_opt('image_member_quota', 'daisy.common.config')
class Controller(controller.BaseController):
def __init__(self):
self.policy = policy.Enforcer()
def _enforce(self, req, action):
"""Authorize an action against our policies"""
try:
self.policy.enforce(req.context, action, {})
except exception.Forbidden:
raise webob.exc.HTTPForbidden()
def _raise_404_if_host_deleted(self, req, host_id):
host = self.get_host_meta_or_404(req, host_id)
if host['deleted']:
msg = _("Host with identifier %s has been deleted.") % host_id
raise webob.exc.HTTPNotFound(msg)
def _raise_404_if_project_deleted(self, req, cluster_id):
project = self.get_cluster_meta_or_404(req, cluster_id)
if project['deleted']:
msg = _("Cluster with identifier %s has been deleted.") % cluster_id
raise webob.exc.HTTPNotFound(msg)
# def get_cluster_hosts(self, req, cluster_id, host_id=None):
# """
# Return a list of dictionaries indicating the members of the
# image, i.e., those tenants the image is shared with.
#
# :param req: the Request object coming from the wsgi layer
# :param image_id: The opaque image identifier
# :retval The response body is a mapping of the following form::
# {'members': [
# {'host_id': <HOST>, ...}, ...
# ]}
# """
# self._enforce(req, 'get_cluster_hosts')
# self._raise_404_if_project_deleted(req, cluster_id)
#
# try:
# members = registry.get_cluster_hosts(req.context, cluster_id, host_id)
# except exception.NotFound:
# msg = _("Project with identifier %s not found") % cluster_id
# LOG.warn(msg)
# raise webob.exc.HTTPNotFound(msg)
# except exception.Forbidden:
# msg = _("Unauthorized project access")
# LOG.warn(msg)
# raise webob.exc.HTTPForbidden(msg)
# return dict(members=members)
@utils.mutating
def delete(self, req, image_id, id):
"""
Removes a membership from the image.
"""
self._check_can_access_image_members(req.context)
self._enforce(req, 'delete_member')
self._raise_404_if_image_deleted(req, image_id)
try:
registry.delete_member(req.context, image_id, id)
self._update_store_acls(req, image_id)
except exception.NotFound as e:
LOG.debug(utils.exception_to_str(e))
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Forbidden as e:
LOG.debug(utils.exception_to_str(e))
raise webob.exc.HTTPNotFound(explanation=e.msg)
return webob.exc.HTTPNoContent()
@utils.mutating
def add_cluster_host(self, req, cluster_id, host_id, body=None):
"""
Adds a host with host_id to project with cluster_id.
"""
self._enforce(req, 'add_cluster_host')
self._raise_404_if_project_deleted(req, cluster_id)
self._raise_404_if_host_deleted(req, host_id)
try:
registry.add_cluster_host(req.context, cluster_id, host_id)
except exception.Invalid as e:
LOG.debug(utils.exception_to_str(e))
raise webob.exc.HTTPBadRequest(explanation=e.msg)
except exception.NotFound as e:
LOG.debug(utils.exception_to_str(e))
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Forbidden as e:
LOG.debug(utils.exception_to_str(e))
raise webob.exc.HTTPNotFound(explanation=e.msg)
return webob.exc.HTTPNoContent()
@utils.mutating
def delete_cluster_host(self, req, cluster_id, host_id):
"""
Delete a host with host_id from project with cluster_id.
"""
self._enforce(req, 'delete_cluster_host')
self._raise_404_if_project_deleted(req, cluster_id)
self._raise_404_if_host_deleted(req, host_id)
try:
registry.delete_cluster_host(req.context, cluster_id, host_id)
except exception.NotFound as e:
LOG.debug(utils.exception_to_str(e))
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Forbidden as e:
LOG.debug(utils.exception_to_str(e))
raise webob.exc.HTTPNotFound(explanation=e.msg)
return webob.exc.HTTPNoContent()
def default(self, req, image_id, id, body=None):
"""This will cover the missing 'show' and 'create' actions"""
raise webob.exc.HTTPMethodNotAllowed()
def _enforce_image_member_quota(self, req, attempted):
if CONF.image_member_quota < 0:
# If value is negative, allow unlimited number of members
return
maximum = CONF.image_member_quota
if attempted > maximum:
msg = _("The limit has been exceeded on the number of allowed "
"image members for this image. Attempted: %(attempted)s, "
"Maximum: %(maximum)s") % {'attempted': attempted,
'maximum': maximum}
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req)
@utils.mutating
def update(self, req, image_id, id, body=None):
"""
Adds a membership to the image, or updates an existing one.
If a body is present, it is a dict with the following format::
{"member": {
"can_share": [True|False]
}}
If "can_share" is provided, the member's ability to share is
set accordingly. If it is not provided, existing memberships
remain unchanged and new memberships default to False.
"""
self._check_can_access_image_members(req.context)
self._enforce(req, 'modify_member')
self._raise_404_if_image_deleted(req, image_id)
new_number_of_members = len(registry.get_image_members(req.context,
image_id)) + 1
self._enforce_image_member_quota(req, new_number_of_members)
# Figure out can_share
can_share = None
if body and 'member' in body and 'can_share' in body['member']:
can_share = bool(body['member']['can_share'])
try:
registry.add_member(req.context, image_id, id, can_share)
self._update_store_acls(req, image_id)
except exception.Invalid as e:
LOG.debug(utils.exception_to_str(e))
raise webob.exc.HTTPBadRequest(explanation=e.msg)
except exception.NotFound as e:
LOG.debug(utils.exception_to_str(e))
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Forbidden as e:
LOG.debug(utils.exception_to_str(e))
raise webob.exc.HTTPNotFound(explanation=e.msg)
return webob.exc.HTTPNoContent()
@utils.mutating
def update_all(self, req, image_id, body):
"""
Replaces the members of the image with those specified in the
body. The body is a dict with the following format::
{"memberships": [
{"member_id": <MEMBER_ID>,
["can_share": [True|False]]}, ...
]}
"""
self._check_can_access_image_members(req.context)
self._enforce(req, 'modify_member')
self._raise_404_if_image_deleted(req, image_id)
memberships = body.get('memberships')
if memberships:
new_number_of_members = len(body['memberships'])
self._enforce_image_member_quota(req, new_number_of_members)
try:
registry.replace_members(req.context, image_id, body)
self._update_store_acls(req, image_id)
except exception.Invalid as e:
LOG.debug(utils.exception_to_str(e))
raise webob.exc.HTTPBadRequest(explanation=e.msg)
except exception.NotFound as e:
LOG.debug(utils.exception_to_str(e))
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Forbidden as e:
LOG.debug(utils.exception_to_str(e))
raise webob.exc.HTTPNotFound(explanation=e.msg)
return webob.exc.HTTPNoContent()
def get_host_projects(self, req, host_id):
"""
Retrieves list of image memberships for the given member.
:param req: the Request object coming from the wsgi layer
:param id: the opaque member identifier
:retval The response body is a mapping of the following form::
{'multi_projects': [
{'cluster_id': <PROJECT>, ...}, ...
]}
"""
try:
members = registry.get_host_projects(req.context, host_id)
except exception.NotFound as e:
LOG.debug(utils.exception_to_str(e))
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Forbidden as e:
LOG.debug(utils.exception_to_str(e))
raise webob.exc.HTTPForbidden(explanation=e.msg)
return dict(multi_projects=members)
def _update_store_acls(self, req, image_id):
image_meta = self.get_image_meta_or_404(req, image_id)
location_uri = image_meta.get('location')
public = image_meta.get('is_public')
self.update_store_acls(req, image_id, location_uri, public)
def create_resource():
"""Image members resource factory method"""
deserializer = wsgi.JSONRequestDeserializer()
serializer = wsgi.JSONResponseSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -0,0 +1,691 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/hosts endpoint for Daisy v1 API
"""
import copy
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound
from webob import Response
from daisy.api import policy
import daisy.api.v1
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy import i18n
from daisy import notifier
import daisy.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
SUPPORT_NETWORK_TYPE = ('PUBLIC', 'PRIVATE', 'STORAGE', 'MANAGEMENT', 'EXTERNAL', 'DEPLOYMENT', 'VXLAN')
SUPPORT_NETWORK_TEMPLATE_TYPE = ('custom', 'template', 'default')
SUPPORT_ML2_TYPE = ('ovs', 'sriov(direct)', 'sriov(macvtap)',
'ovs,sriov(direct)', 'ovs,sriov(macvtap)')
SUPPORT_NETWORK_CAPABILITY = ('high', 'low')
class Controller(controller.BaseController):
"""
WSGI controller for networks resource in Daisy v1 API
The networks resource API is a RESTful web service for host data. The API
is as follows::
GET /networks -- Returns a set of brief metadata about networks
GET /networks/detail -- Returns a set of detailed metadata about
networks
HEAD /networks/<ID> -- Return metadata about an host with id <ID>
GET /networks/<ID> -- Return host data for host with id <ID>
POST /networks -- Store host data and return metadata about the
newly-stored host
PUT /networks/<ID> -- Update host metadata and/or upload host
data for a previously-reserved host
DELETE /networks/<ID> -- Delete the host with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _raise_404_if_network_deleted(self, req, network_id):
network = self.get_network_meta_or_404(req, network_id)
if network['deleted']:
msg = _("Network with identifier %s has been deleted.") % network_id
raise HTTPNotFound(msg)
def _raise_404_if_cluster_delete(self, req, cluster_id):
cluster_id = self.get_cluster_meta_or_404(req, cluster_id)
if cluster_id['deleted']:
msg = _("cluster_id with identifier %s has been deleted.") % cluster_id
raise HTTPNotFound(msg)
def _get_network_name_by_cluster_id(self, context, cluster_id):
networks = registry.get_networks_detail(context, cluster_id)
network_name_list = []
for network in networks:
network_name_list.append(network['name'])
return network_name_list
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
def validate_ip_format(self, ip_str):
'''
valid ip_str format = '10.43.178.9'
invalid ip_str format : '123. 233.42.12', spaces existed in field
'3234.23.453.353', out of range
'-2.23.24.234', negative number in field
'1.2.3.4d', letter in field
'10.43.1789', invalid format
'''
valid_fromat = False
if ip_str.count('.') == 3 and \
all(num.isdigit() and 0<=int(num)<256 for num in ip_str.rstrip().split('.')):
valid_fromat = True
if valid_fromat == False:
msg = (_("%s invalid ip format!") % ip_str)
LOG.warn(msg)
raise HTTPForbidden(msg)
def _ip_into_int(self, ip):
"""
Switch ip string to decimalism integer..
:param ip: ip string
:return: decimalism integer
"""
return reduce(lambda x, y: (x<<8)+y, map(int, ip.split('.')))
def _is_in_network_range(self, ip, network):
"""
Check ip is in range
:param ip: Ip will be checked, like:192.168.1.2.
:param network: Ip range,like:192.168.0.0/24.
:return: If ip in range,return True,else return False.
"""
network = network.split('/')
mask = ~(2**(32 - int(network[1])) - 1)
return (self._ip_into_int(ip) & mask) == (self._ip_into_int(network[0]) & mask)
def _verify_uniqueness_of_network_name(self, req, network_list, network_meta, is_update = False):
"""
Network name is match case and uniqueness in cluster.
:param req:
:param network_list: network plane in cluster
:param network_meta: network plane need be verified
:return:
"""
if not network_list or not network_meta or not network_meta.get('name', None):
msg = _("Input params invalid for verifying uniqueness of network name.")
raise HTTPBadRequest(msg, request=req, content_type="text/plain")
network_name = network_meta['name']
for network in network_list['networks']:
if (is_update and
network_name == network['name'] and
network_meta['id'] == network['id']):
return
# network name don't match case
network_name_list = [network['name'].lower() for network in
network_list['networks'] if network.get('name', None)]
if network_name.lower() in network_name_list:
msg = _("Name of network isn't match case and %s already exits in the cluster." % network_name)
raise HTTPConflict(msg, request=req, content_type="text/plain")
if not is_update:
# Input networks type can't be same with db record which is all ready exit,
# except PRIVATE network.
network_type_exist_list = \
[network['network_type'] for network in network_list['networks']
if network.get('network_type', None) and network['network_type'] != "PRIVATE"
and network['network_type'] != "STORAGE"]
if network_meta.get("network_type", None) in network_type_exist_list:
msg = _("The %s network plane %s must be only, except PRIVATE network." %
(network_meta['network_type'], network_name))
raise HTTPConflict(msg, request=req, content_type="text/plain")
def _valid_vlan_range(self, req, network_meta):
if ((network_meta.has_key('vlan_start') and not network_meta.has_key('vlan_end')) or
(not network_meta.has_key('vlan_start') and network_meta.has_key('vlan_end'))):
raise HTTPBadRequest(explanation="vlan-start and vlan-end must be appeared at the same time", request=req)
if network_meta.has_key('vlan_start'):
if not (int(network_meta['vlan_start']) >= 1 and
int(network_meta['vlan_start']) <= 4094):
raise HTTPBadRequest(explanation="vlan-start must be a integer in '1~4096'", request=req)
if network_meta.has_key('vlan_end'):
if not (int(network_meta['vlan_end']) >= 1 and
int(network_meta['vlan_end']) <= 4094):
raise HTTPBadRequest(explanation="vlan-end must be a integer in '1~4096'", request=req)
if int(network_meta['vlan_start']) > int(network_meta['vlan_end']):
raise HTTPBadRequest(explanation="vlan-start must be less than vlan-end", request=req)
@utils.mutating
def add_network(self, req, network_meta):
"""
Adds a new networks to Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about network
:raises HTTPBadRequest if x-host-name is missing
"""
self._enforce(req, 'add_network')
cluster_id = network_meta.get('cluster_id',None)
if cluster_id:
self._raise_404_if_cluster_delete(req, cluster_id)
network_list = self.detail(req, cluster_id)
self._verify_uniqueness_of_network_name(req, network_list, network_meta)
# else:
# if network_meta.get('type',None) != "template":
# raise HTTPBadRequest(explanation="cluster id must be given", request=req)
network_name=network_meta.get('name',None)
network_name_split = network_name.split('_')
for network_name_info in network_name_split :
if not network_name_info.isalnum():
raise ValueError('network name must be numbers or letters or underscores !')
if not network_meta.has_key('network_type'):
raise HTTPBadRequest(explanation="network-type must be given", request=req)
if network_meta['network_type'] not in SUPPORT_NETWORK_TYPE:
raise HTTPBadRequest(explanation="unsupported network-type", request=req)
if (network_meta.has_key('type') and
network_meta['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE):
raise HTTPBadRequest(explanation="unsupported type", request=req)
if (network_meta.has_key('capability') and
network_meta['capability'] not in SUPPORT_NETWORK_CAPABILITY):
raise HTTPBadRequest(explanation="unsupported capability type", request=req)
self._valid_vlan_range(req, network_meta)
if network_meta.get('ip_ranges', None):
cidr = None
if not network_meta.has_key('cidr'):
msg = (_("When ip range was specified, the CIDR parameter can not be empty."))
LOG.warn(msg)
raise HTTPForbidden(msg)
else:
cidr = network_meta['cidr']
cidr_division = cidr.split('/')
if len(cidr_division) != 2 or ( cidr_division[1] \
and int(cidr_division[1]) > 32 or int(cidr_division[1]) < 0):
msg = (_("Wrong CIDR format."))
LOG.warn(msg)
raise HTTPForbidden(msg)
self.validate_ip_format(cidr_division[0])
ip_ranges = eval(network_meta['ip_ranges'])
last_ip_range_end = 0
int_ip_ranges_list = list()
sorted_int_ip_ranges_list = list()
for ip_pair in ip_ranges:
if ['start', 'end'] != ip_pair.keys():
msg = (_("IP range was not start with 'start:' or end with 'end:'."))
LOG.warn(msg)
raise HTTPForbidden(msg)
ip_start = ip_pair['start']
ip_end = ip_pair['end']
self.validate_ip_format(ip_start) #check ip format
self.validate_ip_format(ip_end)
if not self._is_in_network_range(ip_start, cidr):
msg = (_("IP address %s was not in the range of CIDR %s." % (ip_start, cidr)))
LOG.warn(msg)
raise HTTPForbidden(msg)
if not self._is_in_network_range(ip_end, cidr):
msg = (_("IP address %s was not in the range of CIDR %s." % (ip_end, cidr)))
LOG.warn(msg)
raise HTTPForbidden(msg)
#transform ip format to int when the string format is valid
int_ip_start = self._ip_into_int(ip_start)
int_ip_end = self._ip_into_int(ip_end)
if int_ip_start > int_ip_end:
msg = (_("Wrong ip range format."))
LOG.warn(msg)
raise HTTPForbidden(msg)
int_ip_ranges_list.append([int_ip_start, int_ip_end])
sorted_int_ip_ranges_list = sorted(int_ip_ranges_list, key=lambda x : x[0])
for int_ip_range in sorted_int_ip_ranges_list:
if last_ip_range_end and last_ip_range_end >= int_ip_range[0]:
msg = (_("Between ip ranges can not be overlap."))
LOG.warn(msg) # such as "[10, 15], [12, 16]", last_ip_range_end >= int_ip_range[0], this ip ranges were overlap
raise HTTPForbidden(msg)
else:
last_ip_range_end = int_ip_range[1]
if network_meta.get('cidr', None) \
and network_meta.get('vlan_id', None) \
and cluster_id:
networks = registry.get_networks_detail(req.context, cluster_id)
for network in networks:
if network['cidr'] and network['vlan_id']:
if network_meta['cidr'] == network['cidr'] \
and network_meta['vlan_id'] != network['vlan_id']:
msg = (_('Networks with the same cidr must '
'have the same vlan_id'))
raise HTTPBadRequest(explanation=msg)
if network_meta['vlan_id'] == network['vlan_id'] \
and network_meta['cidr'] != network['cidr']:
msg = (_('Networks with the same vlan_id must '
'have the same cidr'))
raise HTTPBadRequest(explanation=msg)
if network_meta.get('gateway', None) and network_meta.get('cidr', None):
gateway = network_meta['gateway']
cidr = network_meta['cidr']
self.validate_ip_format(gateway)
return_flag = self._is_in_network_range(gateway, cidr)
if not return_flag:
msg = (_('The gateway %s was not in the same segment with the cidr %s of management network.' % (gateway, cidr)))
raise HTTPBadRequest(explanation=msg)
network_meta = registry.add_network_metadata(req.context, network_meta)
return {'network_meta': network_meta}
@utils.mutating
def delete_network(self, req, network_id):
"""
Deletes a network from Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about host
:raises HTTPBadRequest if x-host-name is missing
"""
self._enforce(req, 'delete_network')
#self._raise_404_if_cluster_deleted(req, cluster_id)
#self._raise_404_if_network_deleted(req, network_id)
network = self.get_network_meta_or_404(req, network_id)
if network['deleted']:
msg = _("Network with identifier %s has been deleted.") % network_id
raise HTTPNotFound(msg)
if network['type'] != 'custom':
msg = _("Type of network was not custom, can not delete this network.")
raise HTTPForbidden(msg)
try:
registry.delete_network_metadata(req.context, network_id)
except exception.NotFound as e:
msg = (_("Failed to find network to delete: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete network: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_("Network %(id)s could not be deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
else:
#self.notifier.info('host.delete', host)
return Response(body='', status=200)
@utils.mutating
def get_network(self, req, id):
"""
Returns metadata about an network in the HTTP headers of the
response object
:param req: The WSGI/Webob Request object
:param id: The opaque host identifier
:raises HTTPNotFound if host metadata is not available to user
"""
self._enforce(req, 'get_network')
network_meta = self.get_network_meta_or_404(req, id)
return {'network_meta': network_meta}
def get_all_network(self, req):
"""
List all network.
:param req:
:return:
"""
self._enforce(req, 'get_all_network')
params = self._get_query_params(req)
try:
networks = registry.get_all_networks(req.context,**params)
except Exception:
raise HTTPBadRequest(explanation="Get all networks failed.", request=req)
return dict(networks=networks)
def detail(self, req, id):
"""
Returns detailed information for all available hosts
:param req: The WSGI/Webob Request object
:retval The response body is a mapping of the following form::
{'networks': [
{'id': <ID>,
'name': <NAME>,
'description': <DESCRIPTION>,
'created_at': <TIMESTAMP>,
'updated_at': <TIMESTAMP>,
'deleted_at': <TIMESTAMP>|<NONE>,}, ...
]}
"""
cluster_id = self._raise_404_if_cluster_delete(req, id)
self._enforce(req, 'get_networks')
params = self._get_query_params(req)
try:
networks = registry.get_networks_detail(req.context, id,**params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(networks=networks)
@utils.mutating
def update_network(self, req, network_id, network_meta):
"""
Updates an existing host with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
if network_meta.has_key('name'):
network_name=network_meta.get('name',None)
network_name_split = network_name.split('_')
for network_name_info in network_name_split :
if not network_name_info.isalnum():
raise ValueError('network name must be numbers or letters or underscores !')
self._enforce(req, 'update_network')
#orig_cluster_meta = self.get_cluster_meta_or_404(req, cluster_id)
orig_network_meta = self.get_network_meta_or_404(req, network_id)
# Do not allow any updates on a deleted network.
if orig_network_meta['deleted']:
msg = _("Forbidden to update deleted host.")
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
if (network_meta.has_key('network_type') and
network_meta['network_type'] not in SUPPORT_NETWORK_TYPE):
raise HTTPBadRequest(explanation="unsupported network-type", request=req)
if (network_meta.has_key('type') and
network_meta['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE):
raise HTTPBadRequest(explanation="unsupported type", request=req)
if (network_meta.has_key('type') and
network_meta['type'] == 'template'):
raise HTTPBadRequest(explanation="network template type is not allowed to update", request=req)
if (network_meta.has_key('capability') and
network_meta['capability'] not in SUPPORT_NETWORK_CAPABILITY):
raise HTTPBadRequest(explanation="unsupported capability type", request=req)
self._valid_vlan_range(req, network_meta)
network_name = network_meta.get('name', None)
cluster_id = orig_network_meta['cluster_id']
if network_name and cluster_id:
network_updated = copy.deepcopy(network_meta)
network_updated['id'] = network_id
network_type = network_meta.get('network_type', None)
network_updated['network_type'] = \
orig_network_meta['network_type'] if not network_type else network_type
network_list = self.detail(req, cluster_id)
self._verify_uniqueness_of_network_name(req, network_list, network_updated, True)
cidr = network_meta.get('cidr', orig_network_meta['cidr'])
vlan_id = network_meta.get('vlan_id', orig_network_meta['vlan_id'])
if cidr:
cidr_division = cidr.split('/')
if len(cidr_division) != 2 or ( cidr_division[1] \
and int(cidr_division[1]) > 32 or int(cidr_division[1]) < 0):
msg = (_("Wrong CIDR format."))
LOG.warn(msg)
raise HTTPForbidden(msg)
self.validate_ip_format(cidr_division[0])
if cidr and vlan_id and cluster_id:
networks = registry.get_networks_detail(req.context, cluster_id)
for network in networks:
if network['cidr'] and network['vlan_id']:
if cidr == network['cidr'] \
and vlan_id != network['vlan_id'] \
and network['id'] != network_id:
msg = (_('Networks with the same cidr must have '
'the same vlan_id'))
raise HTTPBadRequest(explanation=msg)
if vlan_id == network['vlan_id'] \
and cidr != network['cidr'] \
and network['id'] != network_id:
msg = (_('Networks with the same vlan_id must '
'have the same cidr'))
raise HTTPBadRequest(explanation=msg)
if network_meta.get('ip_ranges', None):
if not cidr:
msg = (_("When ip range was specified, the CIDR parameter can not be empty."))
LOG.warn(msg)
raise HTTPForbidden(msg)
ip_ranges = eval(network_meta['ip_ranges'])
last_ip_range_end = 0
int_ip_ranges_list = list()
sorted_int_ip_ranges_list = list()
for ip_pair in ip_ranges:
if ['start', 'end'] != ip_pair.keys():
msg = (_("IP range was not start with 'start:' or end with 'end:'."))
LOG.warn(msg)
raise HTTPForbidden(msg)
ip_start = ip_pair['start']
ip_end = ip_pair['end']
self.validate_ip_format(ip_start) #check ip format
self.validate_ip_format(ip_end)
if not self._is_in_network_range(ip_start, cidr):
msg = (_("IP address %s was not in the range of CIDR %s." % (ip_start, cidr)))
LOG.warn(msg)
raise HTTPForbidden(msg)
if not self._is_in_network_range(ip_end, cidr):
msg = (_("IP address %s was not in the range of CIDR %s." % (ip_end, cidr)))
LOG.warn(msg)
raise HTTPForbidden(msg)
#transform ip format to int when the string format is valid
int_ip_start = self._ip_into_int(ip_start)
int_ip_end = self._ip_into_int(ip_end)
if int_ip_start > int_ip_end:
msg = (_("Wrong ip range format."))
LOG.warn(msg)
raise HTTPForbidden(msg)
int_ip_ranges_list.append([int_ip_start, int_ip_end])
sorted_int_ip_ranges_list = sorted(int_ip_ranges_list, key=lambda x : x[0])
LOG.warn("sorted_int_ip_ranges_list: "% sorted_int_ip_ranges_list)
#check ip ranges overlap
for int_ip_range in sorted_int_ip_ranges_list:
if last_ip_range_end and last_ip_range_end >= int_ip_range[0]:
msg = (_("Between ip ranges can not be overlap."))
LOG.warn(msg) # such as "[10, 15], [12, 16]", last_ip_range_end >= int_ip_range[0], this ip ranges were overlap
raise HTTPForbidden(msg)
else:
last_ip_range_end = int_ip_range[1]
if network_meta.get('gateway', orig_network_meta['gateway']) and network_meta.get('cidr', orig_network_meta['cidr']):
gateway = network_meta.get('gateway', orig_network_meta['gateway'])
cidr = network_meta.get('cidr', orig_network_meta['cidr'])
self.validate_ip_format(gateway)
return_flag = self._is_in_network_range(gateway, cidr)
if not return_flag:
msg = (_('The gateway %s was not in the same segment with the cidr %s of management network.' % (gateway, cidr)))
raise HTTPBadRequest(explanation=msg)
try:
network_meta = registry.update_network_metadata(req.context,
network_id,
network_meta)
except exception.Invalid as e:
msg = (_("Failed to update network metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find network to update: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update network: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('Network operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('network.update', network_meta)
return {'network_meta': network_meta}
class HostDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result["network_meta"] = utils.get_network_meta(request)
return result
def add_network(self, request):
return self._deserialize(request)
def update_network(self, request):
return self._deserialize(request)
class HostSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def add_network(self, response, result):
network_meta = result['network_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(network=network_meta))
return response
def delete_network(self, response, result):
network_meta = result['network_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(network=network_meta))
return response
def get_network(self, response, result):
network_meta = result['network_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(network=network_meta))
return response
def create_resource():
"""Hosts resource factory method"""
deserializer = HostDeserializer()
serializer = HostSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

782
code/daisy/daisy/api/v1/roles.py Executable file
View File

@ -0,0 +1,782 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/roles endpoint for Daisy v1 API
"""
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound
from webob import Response
from daisy.api import policy
import daisy.api.v1
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy import i18n
from daisy import notifier
import daisy.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
SUPPORTED_DEPLOYMENT_BACKENDS = ('tecs', 'zenic', 'proton')
SUPPORTED_ROLE = ('CONTROLLER_LB', 'CONTROLLER_HA', 'COMPUTER', 'ZENIC_CTL', 'ZENIC_NFM',
'ZENIC_MDB', 'PROTON', 'CHILD_CELL_1_COMPUTER', 'CONTROLLER_CHILD_CELL_1')
SUPPORT_DISK_LOCATION = ('local', 'share')
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController):
"""
WSGI controller for roles resource in Daisy v1 API
The roles resource API is a RESTful web role for role data. The API
is as follows::
GET /roles -- Returns a set of brief metadata about roles
GET /roles/detail -- Returns a set of detailed metadata about
roles
HEAD /roles/<ID> -- Return metadata about an role with id <ID>
GET /roles/<ID> -- Return role data for role with id <ID>
POST /roles -- Store role data and return metadata about the
newly-stored role
PUT /roles/<ID> -- Update role metadata and/or upload role
data for a previously-reserved role
DELETE /roles/<ID> -- Delete the role with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
def _raise_404_if_host_deleted(self, req, host_id):
host = self.get_host_meta_or_404(req, host_id)
if host['deleted']:
msg = _("Node with identifier %s has been deleted.") % host_id
raise HTTPNotFound(msg)
def _raise_404_if_service_deleted(self, req, service_id):
service = self.get_service_meta_or_404(req, service_id)
if service['deleted']:
msg = _("Service with identifier %s has been deleted.") % service_id
raise HTTPNotFound(msg)
def _raise_404_if_config_set_deleted(self, req, config_set_id):
config_set = self.get_config_set_meta_or_404(req, config_set_id)
if config_set['deleted']:
msg = _("Config_Set with identifier %s has been deleted.") % config_set_id
raise HTTPNotFound(msg)
def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']:
msg = _("cluster with identifier %s has been deleted.") % cluster_id
raise HTTPNotFound(msg)
def _get_service_name_list(self, req, role_service_id_list):
service_name_list = []
for service_id in role_service_id_list:
service_meta = registry.get_service_metadata(req.context, service_id)
service_name_list.append(service_meta['name'])
return service_name_list
def _get_host_disk_except_os_disk_by_info(self, host_info):
'''
type(host_info): <type 'dict'>
host_disk_except_os_disk_lists: disk_size , type = int
'''
#import pdb;pdb.set_trace()
host_disk_except_os_disk_lists = 0
os_disk_m = host_info.get('root_lv_size', 51200)
swap_size_m = host_info.get('swap_lv_size', None)
if swap_size_m:
swap_size_m = (swap_size_m / 4)*4
else:
swap_size_m = 0
boot_partition_m = 400
redundant_partiton_m = 600
if not os_disk_m:
os_disk_m = 51200
#host_disk = 1024
host_disks = host_info.get('disks', None)
host_disk_size_m = 0
if host_disks:
for key, value in host_disks.items():
disk_size_b = str(value.get('size', None))
disk_size_b_str = disk_size_b.strip().split()[0]
if disk_size_b_str:
disk_size_b_int = int(disk_size_b_str)
disk_size_m = disk_size_b_int//(1024*1024)
host_disk_size_m = host_disk_size_m + disk_size_m
host_disk_except_os_disk_lists = host_disk_size_m - os_disk_m - swap_size_m - boot_partition_m - redundant_partiton_m
LOG.warn('----start----host_disk_except_os_disk_lists: %s -----end--' % host_disk_except_os_disk_lists)
return host_disk_except_os_disk_lists
def _check_host_validity(self, **paras):
'''
paras['db_lv_size'], paras['glance_lv_size'] , paras['disk_size']
'''
disk_size = paras.get('disk_size', None)
LOG.warn('--------disk_size:----- %s'% disk_size)
if disk_size:
disk_size_m = int(disk_size)
else:
disk_size_m = 0
if disk_size_m == 0: #Host hard disk size was 0, think that the host does not need to install the system
return #Don't need to ckeck the validity of hard disk size
db_lv_size_m = paras.get('db_lv_size', 300)
if db_lv_size_m:
db_lv_size_m = int(db_lv_size_m)
else:
db_lv_size_m = 0
glance_lv_size_m = paras.get('glance_lv_size', 17100)
if glance_lv_size_m:
glance_lv_size_m = int(glance_lv_size_m)
else:
glance_lv_size_m = 0
nova_lv_size_m = paras.get('nova_lv_size', 0)
if nova_lv_size_m:
nova_lv_size_m = int(nova_lv_size_m)
else:
nova_lv_size_m = 0
if nova_lv_size_m == -1:
nova_lv_size_m = 0
glance_lv_size_m = (glance_lv_size_m/4)*4
db_lv_size_m = (db_lv_size_m/4)*4
nova_lv_size_m = (nova_lv_size_m/4)*4
if glance_lv_size_m + db_lv_size_m + nova_lv_size_m > disk_size_m:
msg = _("There isn't enough disk space to specify database or glance or nova disk, please specify database or glance or nova disk size again")
LOG.debug(msg)
raise HTTPForbidden(msg)
def _check_nodes_exist(self, req, nodes):
for role_host_id in nodes:
self._raise_404_if_host_deleted(req, role_host_id)
def _check_services_exist(self, req, services):
for role_service_id in services:
self._raise_404_if_service_deleted(req, role_service_id)
def _check_config_set_id_exist(self, req, config_set_id):
self._raise_404_if_config_set_deleted(req, config_set_id)
def _check_glance_lv_value(self, req, glance_lv_value, role_name, service_name_list):
if int(glance_lv_value) < 0 and int(glance_lv_value) != -1:
msg = _("glance_lv_size can't be negative except -1.")
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
if not service_name_list or 'glance' not in service_name_list:
msg = _("service 'glance' is not in role %s, so can't "
"set the size of glance lv.") % role_name
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
def _check_db_lv_size(self, req, db_lv_size, service_name_list):
if int(db_lv_size) < 0 and int(db_lv_size) != -1 :
msg = _("The size of database disk can't be negative except -1.")
LOG.debug(msg)
raise HTTPForbidden(msg)
#Only the role with database service can be formulated the size of a database.
if 'mariadb' not in service_name_list and 'mongodb' not in service_name_list:
msg = _('The role without database service is unable '
'to specify the size of the database!')
LOG.debug(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
def _check_nova_lv_size(self, req, nova_lv_size, role_name):
if role_name != "COMPUTER":
msg = _("The role is not COMPUTER, it can't set logic "
"volume disk for nova.")
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
try:
if int(nova_lv_size) < 0 and int(nova_lv_size) != -1:
msg = _("The nova_lv_size must be -1 or [0, N).")
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except:
msg = _("The nova_lv_size must be -1 or [0, N).")
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
def _check_all_lv_size(self, req, db_lv_size, glance_lv_size, nova_lv_size,
host_id_list, cluster_id, argws):
if db_lv_size or glance_lv_size or nova_lv_size:
for host_id in host_id_list:
host_disk_db_glance_nova_size = self.get_host_disk_db_glance_nova_size(req, host_id, cluster_id)
if host_disk_db_glance_nova_size['db_lv_size'] and db_lv_size and \
int(db_lv_size) < int(host_disk_db_glance_nova_size['db_lv_size']):
argws['db_lv_size'] = host_disk_db_glance_nova_size['db_lv_size']
else:
argws['db_lv_size'] = db_lv_size
if host_disk_db_glance_nova_size['glance_lv_size'] and glance_lv_size and \
int(glance_lv_size) < int(host_disk_db_glance_nova_size['glance_lv_size']):
argws['glance_lv_size'] = host_disk_db_glance_nova_size['glance_lv_size']
else:
argws['glance_lv_size'] = glance_lv_size
if host_disk_db_glance_nova_size['nova_lv_size'] and nova_lv_size and \
int(nova_lv_size) < int(host_disk_db_glance_nova_size['nova_lv_size']):
argws['nova_lv_size'] = host_disk_db_glance_nova_size['nova_lv_size']
else:
argws['nova_lv_size'] = nova_lv_size
argws['disk_size'] = host_disk_db_glance_nova_size['disk_size']
LOG.warn('--------host(%s) check_host_validity argws:----- %s'% (host_id, argws))
self._check_host_validity(**argws)
def _check_deployment_backend(self, req, deployment_backend):
if deployment_backend not in SUPPORTED_DEPLOYMENT_BACKENDS:
msg = "deployment backend '%s' is not supported." % deployment_backend
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
def _check_role_type_in_update_role(self, req, role_type, orig_role_meta):
if orig_role_meta['type'].lower() != role_type.lower():
msg = _("Role type can not be updated to other type.")
LOG.debug(msg)
raise HTTPForbidden(msg)
def _check_cluster_id_in_role_update(self, req, role_cluster, orig_role_meta):
if orig_role_meta['type'].lower() == 'template':
msg = _("The template role does not belong to any cluster.")
LOG.debug(msg)
raise HTTPForbidden(msg)
orig_role_cluster = orig_role_meta['cluster_id']
if orig_role_cluster != role_cluster: #Can not change the cluster which the role belongs to
msg = _("Can't update the cluster of the role.")
LOG.debug(msg)
raise HTTPForbidden(msg)
else:
self._raise_404_if_cluster_deleted(req, role_cluster)
def _check_role_name_in_role_update(self, req, role_meta, orig_role_meta):
role_name = role_meta['name']
cluster_id = role_meta.get('cluster_id', orig_role_meta['cluster_id'])
if cluster_id:
self.check_cluster_role_name_repetition(req, role_name, cluster_id)
else: #role type was template, cluster id was None
self.check_template_role_name_repetition(req, role_name)
def _check_all_lv_size_of_nodes_with_role_in_role_update(self, req, role_meta, orig_role_meta,
role_host_id_list):
#check host with this role at the same time
cluster_id = role_meta.get('cluster_id', None)
if not cluster_id: #role with cluster
cluster_id = orig_role_meta['cluster_id']
if not cluster_id: #without cluster id, raise Error
msg = _("The cluster_id parameter can not be None!")
LOG.debug(msg)
raise HTTPForbidden(msg)
argws = dict()
if role_meta.has_key('db_lv_size'):
db_lv_size = role_meta['db_lv_size']
else: #The db_lv_size has been specified before.
db_lv_size = orig_role_meta.get('db_lv_size')
if role_meta.has_key('glance_lv_size'):
glance_lv_size = role_meta['glance_lv_size']
else:
glance_lv_size = orig_role_meta.get('glance_lv_size')
if role_meta.has_key('nova_lv_size'):
nova_lv_size = role_meta['nova_lv_size']
else:
nova_lv_size = orig_role_meta.get('nova_lv_size')
if role_meta.has_key('nodes'):
host_id_list = list(eval(role_meta['nodes'])) + role_host_id_list
else:
host_id_list = role_host_id_list
self._check_all_lv_size(req, db_lv_size, glance_lv_size,
nova_lv_size, host_id_list, cluster_id, argws)
def _check_ntp_server(self, req, role_name):
if role_name != 'CONTROLLER_HA':
msg = 'The role %s need no ntp_server' % role_name
raise HTTPForbidden(explanation=msg)
def _check_role_type_in_role_add(self, req, role_meta):
#role_type == None or not template, cluster id must not be None
role_type = role_meta['type']
if role_type.lower() != 'template':
role_cluster_id = role_meta.get('cluster_id', None)
if not role_cluster_id: #add role without cluster id parameter, raise error
msg = _("The cluster_id parameter can not be None if role was not a template type.")
LOG.debug(msg)
raise HTTPForbidden(msg)
else: #role_type == template, cluster id is not necessary
if role_meta.has_key('cluster_id'):
msg = _("Tht template role cannot be added to any cluster.")
LOG.debug(msg)
raise HTTPForbidden(msg)
def _check_all_lv_size_with_role_in_role_add(self, req, role_meta):
cluster_id = role_meta.get('cluster_id', None)
if not cluster_id: #without cluster id, raise Error
msg = _("The cluster_id parameter can not be None!")
LOG.debug(msg)
raise HTTPForbidden(msg)
argws = dict()
db_lv_size = role_meta.get('db_lv_size', 0)
glance_lv_size = role_meta.get('glance_lv_size', 0)
nova_lv_size = role_meta.get('nova_lv_size', 0)
host_id_list = list(eval(role_meta['nodes']))
self._check_all_lv_size(req, db_lv_size, glance_lv_size,
nova_lv_size, host_id_list, cluster_id, argws)
def get_host_disk_db_glance_nova_size(self, req, host_id, cluster_id):
'''
return :
host_disk_db_glance_nova_size['disk_size'] = 1024000
host_disk_db_glance_nova_size['db_lv_size'] = 1011
host_disk_db_glance_nova_size['glance_lv_size'] = 1011
host_disk_db_glance_nova_size['nova_lv_size'] = 1011
'''
#import pdb;pdb.set_trace()
host_disk_db_glance_nova_size = dict()
db_lv_size = list()
glance_lv_size = list()
nova_lv_size= list()
disk_size = list()
host_info = self.get_host_meta_or_404(req, host_id)
if host_info:
if host_info.has_key('deleted') and host_info['deleted']:
msg = _("Node with identifier %s has been deleted.") % host_info['id']
LOG.debug(msg)
raise HTTPNotFound(msg)
#get host disk infomation
host_disk = self._get_host_disk_except_os_disk_by_info(host_info)
host_disk_db_glance_nova_size['disk_size'] = host_disk
#get role_host db/galnce/nova infomation
cluster_info = self.get_cluster_meta_or_404(req, cluster_id)
if host_info.has_key('cluster'): #host with cluster
if host_info['cluster'] != cluster_info['name']:
#type(host_info['cluster']) = list, type(cluster_info['name']) = str
msg = _("Role and hosts belong to different cluster.")
LOG.debug(msg)
raise HTTPNotFound(msg)
else:
all_roles = registry.get_roles_detail(req.context)
cluster_roles = [role for role in all_roles if role['cluster_id'] == cluster_id]
#roles infomation saved in cluster_roles
if host_info.has_key('role') and host_info['role']: #host with role
for role in cluster_roles:
if role['name'] in host_info['role'] and cluster_roles:
db_lv_size.append(role.get('db_lv_size', None))
glance_lv_size.append(role.get('glance_lv_size', None))
nova_lv_size.append(role.get('nova_lv_size', None))
if db_lv_size:
host_disk_db_glance_nova_size['db_lv_size'] = max(db_lv_size)
else: #host without cluster
host_disk_db_glance_nova_size['db_lv_size'] = 0
if glance_lv_size:
host_disk_db_glance_nova_size['glance_lv_size'] = max(glance_lv_size)
else:
host_disk_db_glance_nova_size['glance_lv_size'] = 0
if nova_lv_size:
host_disk_db_glance_nova_size['nova_lv_size'] = max(nova_lv_size)
else:
host_disk_db_glance_nova_size['nova_lv_size'] = 0
LOG.warn('--------host(%s)disk_db_glance_nova_size:----- %s'% (host_id, host_disk_db_glance_nova_size))
return host_disk_db_glance_nova_size
def check_cluster_role_name_repetition(self, req, role_name, cluster_id):
all_roles = registry.get_roles_detail(req.context)
cluster_roles = [role for role in all_roles if role['cluster_id'] == cluster_id]
cluster_roles_name = [role['name'].lower() for role in cluster_roles]
if role_name.lower() in cluster_roles_name:
msg = _("The role %s has already been in the cluster %s!" % (role_name, cluster_id))
LOG.debug(msg)
raise HTTPForbidden(msg)
def check_template_role_name_repetition(self, req, role_name):
all_roles = registry.get_roles_detail(req.context)
template_roles = [role for role in all_roles if role['cluster_id'] == None]
template_roles_name = [role['name'].lower() for role in template_roles]
if role_name.lower() in template_roles_name:
msg = _("The role %s has already been in the the template role." % role_name)
LOG.debug(msg)
raise HTTPForbidden(msg)
def _check_disk_parameters(self, req, role_meta):
if (role_meta.has_key('disk_location') and
role_meta['disk_location'] not in SUPPORT_DISK_LOCATION):
msg = _("value of disk_location is not supported.")
raise HTTPForbidden(msg)
def _check_type_role_reasonable(self, req, role_meta):
if role_meta['role_type'] not in SUPPORTED_ROLE:
msg = 'The role type %s is illegal' % role_meta['role_type']
raise HTTPForbidden(explanation=msg)
def _check_role_update_parameters(self, req, role_meta, orig_role_meta,
role_service_id_list, role_host_id_list):
role_name = orig_role_meta['name']
if role_meta.get('type', None):
self._check_role_type_in_update_role(req, role_meta['type'], orig_role_meta)
if role_meta.has_key('ntp_server'):
self._check_ntp_server(req, role_name)
if role_meta.has_key('nodes'):
self._check_nodes_exist(req, list(eval(role_meta['nodes'])))
if role_meta.has_key('services'):
self._check_services_exist(req, list(eval(role_meta['services'])))
role_service_id_list.extend(list(eval(role_meta['services'])))
if role_meta.has_key('config_set_id'):
self._check_config_set_id_exist(req, str(role_meta['config_set_id']))
if role_meta.has_key('cluster_id'):
self._check_cluster_id_in_role_update(req, str(role_meta['cluster_id']), orig_role_meta)
if role_meta.has_key('name'):
self._check_role_name_in_role_update(req, role_meta, orig_role_meta)
service_name_list = self._get_service_name_list(req, role_service_id_list)
glance_lv_value = role_meta.get('glance_lv_size', orig_role_meta['glance_lv_size'])
if glance_lv_value:
self._check_glance_lv_value(req, glance_lv_value, role_name, service_name_list)
if role_meta.get('db_lv_size', None) and role_meta['db_lv_size']:
self._check_db_lv_size(req, role_meta['db_lv_size'], service_name_list)
if role_meta.get('nova_lv_size', None):
self._check_nova_lv_size(req, role_meta['nova_lv_size'], role_name)
if role_meta.has_key('nodes') or role_host_id_list:
self._check_all_lv_size_of_nodes_with_role_in_role_update(req, role_meta, orig_role_meta,
role_host_id_list)
self._check_disk_parameters(req, role_meta)
if role_meta.has_key('deployment_backend'):
self._check_deployment_backend(req, role_meta['deployment_backend'])
if role_meta.get('role_type', None):
self._check_type_role_reasonable(req, role_meta)
def _check_role_add_parameters(self, req, role_meta, role_service_id_list):
role_type = role_meta.get('type', None)
role_name = role_meta.get('name', None)
if role_meta.get('type', None):
self._check_role_type_in_role_add(req, role_meta)
if role_meta.has_key('nodes'):
self._check_nodes_exist(req, list(eval(role_meta['nodes'])))
if role_meta.has_key('services'):
self._check_services_exist(req, list(eval(role_meta['services'])))
role_service_id_list.extend(list(eval(role_meta['services'])))
if role_meta.has_key('config_set_id'):
self._check_config_set_id_exist(req, str(role_meta['config_set_id']))
if role_meta.has_key('cluster_id'):
orig_cluster = str(role_meta['cluster_id'])
self._raise_404_if_cluster_deleted(req, orig_cluster)
self.check_cluster_role_name_repetition(req, role_name, orig_cluster)
else:
self.check_template_role_name_repetition(req, role_name)
service_name_list = self._get_service_name_list(req, role_service_id_list)
glance_lv_value = role_meta.get('glance_lv_size', None)
if glance_lv_value:
self._check_glance_lv_value(req, glance_lv_value, role_name, service_name_list)
if role_meta.get('db_lv_size', None) and role_meta['db_lv_size']:
self._check_db_lv_size(req, role_meta['db_lv_size'], service_name_list)
if role_meta.get('nova_lv_size', None):
self._check_nova_lv_size(req, role_meta['nova_lv_size'], role_name)
if role_meta.has_key('nodes'):
self._check_all_lv_size_with_role_in_role_add(req, role_meta)
self._check_disk_parameters(req, role_meta)
if role_meta.has_key('deployment_backend'):
self._check_deployment_backend(req, role_meta['deployment_backend'])
else:
role_meta['deployment_backend'] = 'tecs'
if role_meta.get('role_type', None):
self._check_type_role_reasonable(req, role_meta)
@utils.mutating
def add_role(self, req, role_meta):
"""
Adds a new role to Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about role
:raises HTTPBadRequest if x-role-name is missing
"""
self._enforce(req, 'add_role')
role_service_id_list = []
self._check_role_add_parameters(req, role_meta, role_service_id_list)
role_name = role_meta["name"]
role_description = role_meta["description"]
print role_name
print role_description
role_meta = registry.add_role_metadata(req.context, role_meta)
return {'role_meta': role_meta}
@utils.mutating
def delete_role(self, req, id):
"""
Deletes a role from Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about role
:raises HTTPBadRequest if x-role-name is missing
"""
self._enforce(req, 'delete_role')
#role = self.get_role_meta_or_404(req, id)
print "delete_role:%s" % id
try:
registry.delete_role_metadata(req.context, id)
except exception.NotFound as e:
msg = (_("Failed to find role to delete: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete role: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_("role %(id)s could not be deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
else:
#self.notifier.info('role.delete', role)
return Response(body='', status=200)
@utils.mutating
def get_role(self, req, id):
"""
Returns metadata about an role in the HTTP headers of the
response object
:param req: The WSGI/Webob Request object
:param id: The opaque role identifier
:raises HTTPNotFound if role metadata is not available to user
"""
self._enforce(req, 'get_role')
role_meta = self.get_role_meta_or_404(req, id)
return {'role_meta': role_meta}
def detail(self, req):
"""
Returns detailed information for all available roles
:param req: The WSGI/Webob Request object
:retval The response body is a mapping of the following form::
{'roles': [
{'id': <ID>,
'name': <NAME>,
'description': <DESCRIPTION>,
'created_at': <TIMESTAMP>,
'updated_at': <TIMESTAMP>,
'deleted_at': <TIMESTAMP>|<NONE>,}, ...
]}
"""
self._enforce(req, 'get_roles')
params = self._get_query_params(req)
filters=params.get('filters',None)
if 'cluster_id' in filters:
cluster_id=filters['cluster_id']
self._raise_404_if_cluster_deleted(req, cluster_id)
try:
roles = registry.get_roles_detail(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(roles=roles)
@utils.mutating
def update_role(self, req, id, role_meta):
"""
Updates an existing role with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
orig_role_meta = self.get_role_meta_or_404(req, id)
role_service_list = registry.get_role_services(req.context, id)
role_service_id_list = [ role_service['service_id'] for role_service in role_service_list ]
role_host_info_list = registry.get_role_host_metadata(req.context, id)
role_host_id_list = [role_host['host_id'] for role_host in role_host_info_list]
self._check_role_update_parameters(req, role_meta, orig_role_meta, role_service_id_list, role_host_id_list)
self._enforce(req, 'modify_image')
#orig_role_meta = self.get_role_meta_or_404(req, id)
# Do not allow any updates on a deleted image.
# Fix for LP Bug #1060930
if orig_role_meta['deleted']:
msg = _("Forbidden to update deleted role.")
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
try:
role_meta = registry.update_role_metadata(req.context,
id,
role_meta)
except exception.Invalid as e:
msg = (_("Failed to update role metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find role to update: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update role: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('Host operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('role.update', role_meta)
return {'role_meta': role_meta}
class RoleDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result["role_meta"] = utils.get_role_meta(request)
return result
def add_role(self, request):
return self._deserialize(request)
def update_role(self, request):
return self._deserialize(request)
class RoleSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def add_role(self, response, result):
role_meta = result['role_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(role=role_meta))
return response
def delete_role(self, response, result):
role_meta = result['role_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(role=role_meta))
return response
def get_role(self, response, result):
role_meta = result['role_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(role=role_meta))
return response
def create_resource():
"""Roles resource factory method"""
deserializer = RoleDeserializer()
serializer = RoleSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

574
code/daisy/daisy/api/v1/router.py Executable file
View File

@ -0,0 +1,574 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#from daisy.api.v1 import images
from daisy.api.v1 import hosts
from daisy.api.v1 import clusters
from daisy.api.v1 import template
from daisy.api.v1 import components
from daisy.api.v1 import services
from daisy.api.v1 import roles
from daisy.api.v1 import members
from daisy.api.v1 import config_files
from daisy.api.v1 import config_sets
from daisy.api.v1 import configs
from daisy.api.v1 import networks
from daisy.api.v1 import install
from daisy.api.v1 import disk_array
from daisy.api.v1 import host_template
from daisy.common import wsgi
class API(wsgi.Router):
"""WSGI router for Glance v1 API requests."""
def __init__(self, mapper):
reject_method_resource = wsgi.Resource(wsgi.RejectMethodController())
'''images_resource = images.create_resource()
mapper.connect("/",
controller=images_resource,
action="index")
mapper.connect("/images",
controller=images_resource,
action='index',
conditions={'method': ['GET']})
mapper.connect("/images",
controller=images_resource,
action='create',
conditions={'method': ['POST']})
mapper.connect("/images",
controller=reject_method_resource,
action='reject',
allowed_methods='GET, POST',
conditions={'method': ['PUT', 'DELETE', 'HEAD',
'PATCH']})
mapper.connect("/images/detail",
controller=images_resource,
action='detail',
conditions={'method': ['GET', 'HEAD']})
mapper.connect("/images/detail",
controller=reject_method_resource,
action='reject',
allowed_methods='GET, HEAD',
conditions={'method': ['POST', 'PUT', 'DELETE',
'PATCH']})
mapper.connect("/images/{id}",
controller=images_resource,
action="meta",
conditions=dict(method=["HEAD"]))
mapper.connect("/images/{id}",
controller=images_resource,
action="show",
conditions=dict(method=["GET"]))
mapper.connect("/images/{id}",
controller=images_resource,
action="update",
conditions=dict(method=["PUT"]))
mapper.connect("/images/{id}",
controller=images_resource,
action="delete",
conditions=dict(method=["DELETE"]))
mapper.connect("/images/{id}",
controller=reject_method_resource,
action='reject',
allowed_methods='GET, HEAD, PUT, DELETE',
conditions={'method': ['POST', 'PATCH']})
members_resource = members.create_resource()
mapper.connect("/images/{image_id}/members",
controller=members_resource,
action="index",
conditions={'method': ['GET']})
mapper.connect("/images/{image_id}/members",
controller=members_resource,
action="update_all",
conditions=dict(method=["PUT"]))
mapper.connect("/images/{image_id}/members",
controller=reject_method_resource,
action='reject',
allowed_methods='GET, PUT',
conditions={'method': ['POST', 'DELETE', 'HEAD',
'PATCH']})
mapper.connect("/images/{image_id}/members/{id}",
controller=members_resource,
action="show",
conditions={'method': ['GET']})
mapper.connect("/images/{image_id}/members/{id}",
controller=members_resource,
action="update",
conditions={'method': ['PUT']})
mapper.connect("/images/{image_id}/members/{id}",
controller=members_resource,
action="delete",
conditions={'method': ['DELETE']})
mapper.connect("/images/{image_id}/members/{id}",
controller=reject_method_resource,
action='reject',
allowed_methods='GET, PUT, DELETE',
conditions={'method': ['POST', 'HEAD', 'PATCH']})
mapper.connect("/shared-images/{id}",
controller=members_resource,
action="index_shared_images")'''
hosts_resource = hosts.create_resource()
mapper.connect("/nodes",
controller=hosts_resource,
action='add_host',
conditions={'method': ['POST']})
mapper.connect("/nodes/{id}",
controller=hosts_resource,
action='delete_host',
conditions={'method': ['DELETE']})
mapper.connect("/nodes/{id}",
controller=hosts_resource,
action='update_host',
conditions={'method': ['PUT']})
mapper.connect("/nodes",
controller=hosts_resource,
action='detail',
conditions={'method': ['GET']})
mapper.connect("/nodes/{id}",
controller=hosts_resource,
action='get_host',
conditions={'method': ['GET']})
mapper.connect("/discover_host/",
controller=hosts_resource,
action='discover_host',
conditions={'method': ['POST']})
mapper.connect("/discover/nodes",
controller=hosts_resource,
action='add_discover_host',
conditions={'method': ['POST']})
mapper.connect("/discover/nodes/{id}",
controller=hosts_resource,
action='delete_discover_host',
conditions={'method': ['DELETE']})
mapper.connect("/discover/nodes",
controller=hosts_resource,
action='detail_discover_host',
conditions={'method': ['GET']})
mapper.connect("/discover/nodes/{id}",
controller=hosts_resource,
action='update_discover_host',
conditions={'method': ['PUT']})
mapper.connect("/discover/nodes/{discover_host_id}",
controller=hosts_resource,
action='get_discover_host_detail',
conditions={'method': ['GET']})
clusters_resource = clusters.create_resource()
mapper.connect("/clusters",
controller=clusters_resource,
action='add_cluster',
conditions={'method': ['POST']})
mapper.connect("/clusters/{id}",
controller=clusters_resource,
action='delete_cluster',
conditions={'method': ['DELETE']})
mapper.connect("/clusters/{id}",
controller=clusters_resource,
action='update_cluster',
conditions={'method': ['PUT']})
mapper.connect("/clusters",
controller=clusters_resource,
action='detail',
conditions={'method': ['GET']})
mapper.connect("/clusters/{id}",
controller=clusters_resource,
action='get_cluster',
conditions={'method': ['GET']})
mapper.connect("/clusters/{id}",
controller=clusters_resource,
action='update_cluster',
conditions={'method': ['PUT']})
template_resource = template.create_resource()
mapper.connect("/template",
controller=template_resource,
action='add_template',
conditions={'method': ['POST']})
mapper.connect("/template/{template_id}",
controller=template_resource,
action='update_template',
conditions={'method': ['PUT']})
mapper.connect("/template/{template_id}",
controller=template_resource,
action='delete_template',
conditions={'method': ['DELETE']})
mapper.connect("/template/lists",
controller=template_resource,
action='get_template_lists',
conditions={'method': ['GET']})
mapper.connect("/template/{template_id}",
controller=template_resource,
action='get_template_detail',
conditions={'method': ['GET']})
mapper.connect("/export_db_to_json",
controller=template_resource,
action='export_db_to_json',
conditions={'method': ['POST']})
mapper.connect("/import_json_to_template",
controller=template_resource,
action='import_json_to_template',
conditions={'method': ['POST']})
mapper.connect("/import_template_to_db",
controller=template_resource,
action='import_template_to_db',
conditions={'method': ['POST']})
host_template_resource = host_template.create_resource()
mapper.connect("/host_template",
controller=host_template_resource,
action='add_host_template',
conditions={'method': ['POST']})
mapper.connect("/host_template/{template_id}",
controller=host_template_resource,
action='update_host_template',
conditions={'method': ['PUT']})
mapper.connect("/host_template",
controller=host_template_resource,
action='delete_host_template',
conditions={'method': ['PUT']})
mapper.connect("/host_template/lists",
controller=host_template_resource,
action='get_host_template_lists',
conditions={'method': ['GET']})
mapper.connect("/host_template/{template_id}",
controller=host_template_resource,
action='get_host_template_detail',
conditions={'method': ['GET']})
mapper.connect("/host_to_template",
controller=host_template_resource,
action='host_to_template',
conditions={'method': ['POST']})
mapper.connect("/template_to_host",
controller=host_template_resource,
action='template_to_host',
conditions={'method': ['PUT']})
components_resource = components.create_resource()
mapper.connect("/components",
controller=components_resource,
action='add_component',
conditions={'method': ['POST']})
mapper.connect("/components/{id}",
controller=components_resource,
action='delete_component',
conditions={'method': ['DELETE']})
mapper.connect("/components/detail",
controller=components_resource,
action='detail',
conditions={'method': ['GET']})
mapper.connect("/components/{id}",
controller=components_resource,
action='get_component',
conditions={'method': ['GET']})
mapper.connect("/components/{id}",
controller=components_resource,
action='update_component',
conditions={'method': ['PUT']})
services_resource = services.create_resource()
mapper.connect("/services",
controller=services_resource,
action='add_service',
conditions={'method': ['POST']})
mapper.connect("/services/{id}",
controller=services_resource,
action='delete_service',
conditions={'method': ['DELETE']})
mapper.connect("/services/detail",
controller=services_resource,
action='detail',
conditions={'method': ['GET']})
mapper.connect("/services/{id}",
controller=services_resource,
action='get_service',
conditions={'method': ['GET']})
mapper.connect("/services/{id}",
controller=services_resource,
action='update_service',
conditions={'method': ['PUT']})
roles_resource = roles.create_resource()
mapper.connect("/roles",
controller=roles_resource,
action='add_role',
conditions={'method': ['POST']})
mapper.connect("/roles/{id}",
controller=roles_resource,
action='delete_role',
conditions={'method': ['DELETE']})
mapper.connect("/roles/detail",
controller=roles_resource,
action='detail',
conditions={'method': ['GET']})
mapper.connect("/roles/{id}",
controller=roles_resource,
action='get_role',
conditions={'method': ['GET']})
mapper.connect("/roles/{id}",
controller=roles_resource,
action='update_role',
conditions={'method': ['PUT']})
members_resource = members.create_resource()
mapper.connect("/clusters/{cluster_id}/nodes/{host_id}",
controller=members_resource,
action="add_cluster_host",
conditions={'method': ['PUT']})
mapper.connect("/clusters/{cluster_id}/nodes/{host_id}",
controller=members_resource,
action="delete_cluster_host",
conditions={'method': ['DELETE']})
# mapper.connect("/clusters/{cluster_id}/nodes/{host_id}",
# controller=members_resource,
# action="get_cluster_hosts",
# conditions={'method': ['GET']})
# mapper.connect("/clusters/{cluster_id}/nodes",
# controller=members_resource,
# action="get_cluster_hosts",
# conditions={'method': ['GET']})
# mapper.connect("/multi_clusters/nodes/{host_id}",
# controller=members_resource,
# action="get_host_clusters",
# conditions={'method': ['GET']})
config_files_resource = config_files.create_resource()
mapper.connect("/config_files",
controller=config_files_resource,
action="add_config_file",
conditions={'method': ['POST']})
mapper.connect("/config_files/{id}",
controller=config_files_resource,
action="delete_config_file",
conditions={'method': ['DELETE']})
mapper.connect("/config_files/{id}",
controller=config_files_resource,
action="update_config_file",
conditions={'method': ['PUT']})
mapper.connect("/config_files/detail",
controller=config_files_resource,
action="detail",
conditions={'method': ['GET']})
mapper.connect("/config_files/{id}",
controller=config_files_resource,
action="get_config_file",
conditions=dict(method=["GET"]))
config_sets_resource = config_sets.create_resource()
mapper.connect("/config_sets",
controller=config_sets_resource,
action="add_config_set",
conditions={'method': ['POST']})
mapper.connect("/config_sets/{id}",
controller=config_sets_resource,
action="delete_config_set",
conditions={'method': ['DELETE']})
mapper.connect("/config_sets/{id}",
controller=config_sets_resource,
action="update_config_set",
conditions={'method': ['PUT']})
mapper.connect("/config_sets/detail",
controller=config_sets_resource,
action="detail",
conditions={'method': ['GET']})
mapper.connect("/config_sets/{id}",
controller=config_sets_resource,
action="get_config_set",
conditions=dict(method=["GET"]))
mapper.connect("/cluster_config_set_update",
controller=config_sets_resource,
action="cluster_config_set_update",
conditions={'method': ['POST']})
mapper.connect("/cluster_config_set_progress",
controller=config_sets_resource,
action="cluster_config_set_progress",
conditions={'method': ['POST']})
configs_resource = configs.create_resource()
mapper.connect("/configs",
controller=configs_resource,
action="add_config",
conditions={'method': ['POST']})
mapper.connect("/configs_delete",
controller=configs_resource,
action="delete_config",
conditions={'method': ['DELETE']})
mapper.connect("/configs/detail",
controller=configs_resource,
action="detail",
conditions={'method': ['GET']})
mapper.connect("/configs/{id}",
controller=configs_resource,
action="get_config",
conditions=dict(method=["GET"]))
networks_resource = networks.create_resource()
mapper.connect("/networks",
controller=networks_resource,
action='add_network',
conditions={'method': ['POST']})
mapper.connect("/networks/{network_id}",
controller=networks_resource,
action='delete_network',
conditions={'method': ['DELETE']})
mapper.connect("/networks/{network_id}",
controller=networks_resource,
action='update_network',
conditions={'method': ['PUT']})
mapper.connect("/clusters/{id}/networks",
controller=networks_resource,
action='detail',
conditions={'method': ['GET']})
mapper.connect("/networks/{id}",
controller=networks_resource,
action='get_network',
conditions={'method': ['GET']})
mapper.connect("/networks",
controller=networks_resource,
action='get_all_network',
conditions={'method': ['GET']})
install_resource = install.create_resource()
mapper.connect("/install",
controller=install_resource,
action='install_cluster',
conditions={'method': ['POST']})
mapper.connect("/export_db",
controller=install_resource,
action='export_db',
conditions={'method': ['POST']})
mapper.connect("/uninstall/{cluster_id}",
controller=install_resource,
action='uninstall_cluster',
conditions={'method': ['POST']})
mapper.connect("/uninstall/{cluster_id}",
controller=install_resource,
action='uninstall_progress',
conditions={'method': ['GET']})
mapper.connect("/update/{cluster_id}",
controller=install_resource,
action='update_cluster',
conditions={'method': ['POST']})
mapper.connect("/update/{cluster_id}",
controller=install_resource,
action='update_progress',
conditions={'method': ['GET']})
mapper.connect("/disk_array/{cluster_id}",
controller=install_resource,
action='update_disk_array',
conditions={'method': ['POST']})
#mapper.connect("/update/{cluster_id}/versions/{versions_id}",
# controller=update_resource,
# action='update_cluster_version',
# conditions={'method': ['POST']})
array_resource = disk_array.create_resource()
mapper.connect("/service_disk",
controller=array_resource,
action='service_disk_add',
conditions={'method': ['POST']})
mapper.connect("/service_disk/{id}",
controller=array_resource,
action='service_disk_delete',
conditions={'method': ['DELETE']})
mapper.connect("/service_disk/{id}",
controller=array_resource,
action='service_disk_update',
conditions={'method': ['PUT']})
mapper.connect("/service_disk/list",
controller=array_resource,
action='service_disk_list',
conditions={'method': ['GET']})
mapper.connect("/service_disk/{id}",
controller=array_resource,
action='service_disk_detail',
conditions={'method': ['GET']})
mapper.connect("/cinder_volume",
controller=array_resource,
action='cinder_volume_add',
conditions={'method': ['POST']})
mapper.connect("/cinder_volume/{id}",
controller=array_resource,
action='cinder_volume_delete',
conditions={'method': ['DELETE']})
mapper.connect("/cinder_volume/{id}",
controller=array_resource,
action='cinder_volume_update',
conditions={'method': ['PUT']})
mapper.connect("/cinder_volume/list",
controller=array_resource,
action='cinder_volume_list',
conditions={'method': ['GET']})
mapper.connect("/cinder_volume/{id}",
controller=array_resource,
action='cinder_volume_detail',
conditions={'method': ['GET']})
super(API, self).__init__(mapper)

View File

@ -0,0 +1,334 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/services endpoint for Daisy v1 API
"""
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound
from webob import Response
from daisy.api import policy
import daisy.api.v1
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy import i18n
from daisy import notifier
import daisy.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController):
"""
WSGI controller for services resource in Daisy v1 API
The services resource API is a RESTful web service for service data. The API
is as follows::
GET /services -- Returns a set of brief metadata about services
GET /services/detail -- Returns a set of detailed metadata about
services
HEAD /services/<ID> -- Return metadata about an service with id <ID>
GET /services/<ID> -- Return service data for service with id <ID>
POST /services -- Store service data and return metadata about the
newly-stored service
PUT /services/<ID> -- Update service metadata and/or upload service
data for a previously-reserved service
DELETE /services/<ID> -- Delete the service with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
def _raise_404_if_component_deleted(self, req, component_id):
component = self.get_component_meta_or_404(req, component_id)
if component['deleted']:
msg = _("Component with identifier %s has been deleted.") % component_id
raise HTTPNotFound(msg)
@utils.mutating
def add_service(self, req, service_meta):
"""
Adds a new service to Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about service
:raises HTTPBadRequest if x-service-name is missing
"""
self._enforce(req, 'add_service')
service_name = service_meta["name"]
service_description = service_meta["description"]
if service_meta.has_key('component_id'):
orig_component_id = str(service_meta['component_id'])
self._raise_404_if_component_deleted(req, orig_component_id)
print service_name
print service_description
service_meta = registry.add_service_metadata(req.context, service_meta)
return {'service_meta': service_meta}
@utils.mutating
def delete_service(self, req, id):
"""
Deletes a service from Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about service
:raises HTTPBadRequest if x-service-name is missing
"""
self._enforce(req, 'delete_service')
#service = self.get_service_meta_or_404(req, id)
print "delete_service:%s" % id
try:
registry.delete_service_metadata(req.context, id)
except exception.NotFound as e:
msg = (_("Failed to find service to delete: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete service: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_("service %(id)s could not be deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
else:
#self.notifier.info('service.delete', service)
return Response(body='', status=200)
@utils.mutating
def get_service(self, req, id):
"""
Returns metadata about an service in the HTTP headers of the
response object
:param req: The WSGI/Webob Request object
:param id: The opaque service identifier
:raises HTTPNotFound if service metadata is not available to user
"""
self._enforce(req, 'get_service')
service_meta = self.get_service_meta_or_404(req, id)
return {'service_meta': service_meta}
def detail(self, req):
"""
Returns detailed information for all available services
:param req: The WSGI/Webob Request object
:retval The response body is a mapping of the following form::
{'services': [
{'id': <ID>,
'name': <NAME>,
'description': <DESCRIPTION>,
'created_at': <TIMESTAMP>,
'updated_at': <TIMESTAMP>,
'deleted_at': <TIMESTAMP>|<NONE>,}, ...
]}
"""
self._enforce(req, 'get_services')
params = self._get_query_params(req)
try:
services = registry.get_services_detail(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(services=services)
@utils.mutating
def update_service(self, req, id, service_meta):
"""
Updates an existing service with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'modify_image')
orig_service_meta = self.get_service_meta_or_404(req, id)
# Do not allow any updates on a deleted image.
# Fix for LP Bug #1060930
if orig_service_meta['deleted']:
msg = _("Forbidden to update deleted service.")
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
try:
service_meta = registry.update_service_metadata(req.context,
id,
service_meta)
except exception.Invalid as e:
msg = (_("Failed to update service metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find service to update: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update service: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('Host operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('service.update', service_meta)
return {'service_meta': service_meta}
class ServiceDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result["service_meta"] = utils.get_service_meta(request)
return result
def add_service(self, request):
return self._deserialize(request)
def update_service(self, request):
return self._deserialize(request)
class ServiceSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def add_service(self, response, result):
service_meta = result['service_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(service=service_meta))
return response
def delete_service(self, response, result):
service_meta = result['service_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(service=service_meta))
return response
def get_service(self, response, result):
service_meta = result['service_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(service=service_meta))
return response
def create_resource():
"""Services resource factory method"""
deserializer = ServiceDeserializer()
serializer = ServiceSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -0,0 +1,629 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/Templates endpoint for Daisy v1 API
"""
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound
from webob import Response
import copy
import json
from daisy.api import policy
import daisy.api.v1
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy import i18n
from daisy import notifier
import daisy.registry.client.v1.api as registry
from daisy.registry.api.v1 import template
import daisy.api.backends.tecs.common as tecs_cmn
import daisy.api.backends.common as daisy_cmn
try:
import simplejson as json
except ImportError:
import json
daisy_tecs_path = tecs_cmn.daisy_tecs_path
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = template.SUPPORTED_PARAMS
SUPPORTED_FILTERS = template.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController):
"""
WSGI controller for Templates resource in Daisy v1 API
The Templates resource API is a RESTful web Template for Template data. The API
is as follows::
GET /Templates -- Returns a set of brief metadata about Templates
GET /Templates/detail -- Returns a set of detailed metadata about
Templates
HEAD /Templates/<ID> -- Return metadata about an Template with id <ID>
GET /Templates/<ID> -- Return Template data for Template with id <ID>
POST /Templates -- Store Template data and return metadata about the
newly-stored Template
PUT /Templates/<ID> -- Update Template metadata and/or upload Template
data for a previously-reserved Template
DELETE /Templates/<ID> -- Delete the Template with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']:
msg = _("Cluster with identifier %s has been deleted.") % cluster_id
raise webob.exc.HTTPNotFound(msg)
@utils.mutating
def add_template(self, req, template):
"""
Adds a new cluster template to Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about Template
:raises HTTPBadRequest if x-Template-name is missing
"""
self._enforce(req, 'add_template')
template_name = template["name"]
template = registry.add_template_metadata(req.context, template)
return {'template': template}
@utils.mutating
def update_template(self, req, template_id, template):
"""
Updates an existing Template with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'update_template')
try:
template = registry.update_template_metadata(req.context,
template_id,
template)
except exception.Invalid as e:
msg = (_("Failed to update template metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find template to update: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update template: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('template operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('template.update', template)
return {'template': template}
@utils.mutating
def delete_template(self, req, template_id):
"""
delete a existing cluster template with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'delete_template')
try:
registry.delete_template_metadata(req.context, template_id)
except exception.NotFound as e:
msg = (_("Failed to find template to delete: %s") %
utils.exception_to_str(e))
LOG.error(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete template: %s") %
utils.exception_to_str(e))
LOG.error(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_("template %(id)s could not be deleted because it is in use: "
"%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)})
LOG.error(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
else:
return Response(body='', status=200)
def _del_general_params(self,param):
del param['created_at']
del param['updated_at']
del param['deleted']
del param['deleted_at']
del param['id']
def _del_cluster_params(self,cluster):
del cluster['networks']
del cluster['vlan_start']
del cluster['vlan_end']
del cluster['vni_start']
del cluster['vni_end']
del cluster['gre_id_start']
del cluster['gre_id_end']
del cluster['net_l23_provider']
del cluster['public_vip']
del cluster['segmentation_type']
del cluster['base_mac']
del cluster['name']
@utils.mutating
def export_db_to_json(self, req, template):
"""
Template TECS to a cluster.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-Template-cluster is missing
"""
cluster_name = template.get('cluster_name',None)
type = template.get('type',None)
description = template.get('description',None)
template_name = template.get('template_name',None)
self._enforce(req, 'export_db_to_json')
cinder_volume_list = []
template_content = {}
template_json = {}
template_id = ""
if not type or type == "tecs":
try:
params = {'filters': {'name':cluster_name}}
clusters = registry.get_clusters_detail(req.context, **params)
if clusters:
cluster_id = clusters[0]['id']
else:
msg = "the cluster %s is not exist"%cluster_name
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain")
params = {'filters': {'cluster_id':cluster_id}}
cluster = registry.get_cluster_metadata(req.context, cluster_id)
roles = registry.get_roles_detail(req.context, **params)
networks = registry.get_networks_detail(req.context, cluster_id,**params)
for role in roles:
cinder_volume_params = {'filters': {'role_id':role['id']}}
cinder_volumes = registry.list_cinder_volume_metadata(req.context, **cinder_volume_params)
for cinder_volume in cinder_volumes:
if cinder_volume.get('role_id',None):
cinder_volume['role_id'] = role['name']
self._del_general_params(cinder_volume)
cinder_volume_list.append(cinder_volume)
if role.get('config_set_id',None):
config_set = registry.get_config_set_metadata(req.context, role['config_set_id'])
role['config_set_id'] = config_set['name']
del role['cluster_id']
del role['status']
del role['progress']
del role['messages']
del role['config_set_update_progress']
self._del_general_params(role)
for network in networks:
network_detail = registry.get_network_metadata(req.context, network['id'])
if network_detail.get('ip_ranges',None):
network['ip_ranges'] = network_detail['ip_ranges']
del network['cluster_id']
self._del_general_params(network)
if cluster.get('routers',None):
for router in cluster['routers']:
del router['cluster_id']
self._del_general_params(router)
if cluster.get('logic_networks',None):
for logic_network in cluster['logic_networks']:
for subnet in logic_network['subnets']:
del subnet['logic_network_id']
del subnet['router_id']
self._del_general_params(subnet)
del logic_network['cluster_id']
self._del_general_params(logic_network)
if cluster.get('nodes',None):
del cluster['nodes']
self._del_general_params(cluster)
self._del_cluster_params(cluster)
template_content['cluster'] = cluster
template_content['roles'] = roles
template_content['networks'] = networks
template_content['cinder_volumes'] = cinder_volume_list
template_json['content'] = json.dumps(template_content)
template_json['type'] = 'tecs'
template_json['name'] = template_name
template_json['description'] = description
template_host_params = {'cluster_name':cluster_name}
template_hosts = registry.host_template_lists_metadata(req.context, **template_host_params)
if template_hosts:
template_json['hosts'] = template_hosts[0]['hosts']
else:
template_json['hosts'] = "[]"
template_params = {'filters': {'name':template_name}}
template_list = registry.template_lists_metadata(req.context, **template_params)
if template_list:
update_template = registry.update_template_metadata(req.context, template_list[0]['id'], template_json)
template_id = template_list[0]['id']
else:
add_template = registry.add_template_metadata(req.context, template_json)
template_id = add_template['id']
if template_id:
template_detail = registry.template_detail_metadata(req.context, template_id)
self._del_general_params(template_detail)
template_detail['content'] = json.loads(template_detail['content'])
if template_detail['hosts']:
template_detail['hosts'] = json.loads(template_detail['hosts'])
tecs_json = daisy_tecs_path + "%s.json"%template_name
cmd = 'rm -rf %s' % (tecs_json,)
daisy_cmn.subprocess_call(cmd)
with open(tecs_json, "w+") as fp:
fp.write(json.dumps(template_detail))
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return {"template":template_detail}
@utils.mutating
def import_json_to_template(self, req, template):
template_id = ""
template = json.loads(template.get('template',None))
template_cluster = copy.deepcopy(template)
template_name = template_cluster.get('name',None)
template_params = {'filters': {'name':template_name}}
try:
if template_cluster.get('content',None):
template_cluster['content'] = json.dumps(template_cluster['content'])
if template_cluster.get('hosts',None):
template_cluster['hosts'] = json.dumps(template_cluster['hosts'])
else:
template_cluster['hosts'] = "[]"
template_list = registry.template_lists_metadata(req.context, **template_params)
if template_list:
update_template_cluster = registry.update_template_metadata(req.context, template_list[0]['id'], template_cluster)
template_id = template_list[0]['id']
else:
add_template_cluster = registry.add_template_metadata(req.context, template_cluster)
template_id = add_template_cluster['id']
if template_id:
template_detail = registry.template_detail_metadata(req.context, template_id)
del template_detail['deleted']
del template_detail['deleted_at']
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return {"template":template_detail}
@utils.mutating
def import_template_to_db(self, req, template):
cluster_id = ""
template_cluster = {}
cluster_meta = {}
template_meta = copy.deepcopy(template)
template_name = template_meta.get('name',None)
cluster_name = template_meta.get('cluster',None)
template_params = {'filters': {'name':template_name}}
template_list = registry.template_lists_metadata(req.context, **template_params)
if template_list:
template_cluster = template_list[0]
else:
msg = "the template %s is not exist" % template_name
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain")
try:
template_content = json.loads(template_cluster['content'])
template_content_cluster = template_content['cluster']
template_content_cluster['name'] = cluster_name
template_content_cluster['networking_parameters'] = str(template_content_cluster['networking_parameters'])
template_content_cluster['logic_networks'] = str(template_content_cluster['logic_networks'])
template_content_cluster['logic_networks'] = template_content_cluster['logic_networks'].replace("\'true\'","True")
template_content_cluster['routers'] = str(template_content_cluster['routers'])
if template_cluster['hosts']:
template_hosts = json.loads(template_cluster['hosts'])
template_host_params = {'cluster_name':cluster_name}
template_host_list = registry.host_template_lists_metadata(req.context, **template_host_params)
if template_host_list:
update_template_meta = {"cluster_name": cluster_name, "hosts":json.dumps(template_hosts)}
registry.update_host_template_metadata(req.context, template_host_list[0]['id'], update_template_meta)
else:
template_meta = {"cluster_name": cluster_name, "hosts":json.dumps(template_hosts)}
registry.add_host_template_metadata(req.context, template_meta)
cluster_params = {'filters': {'name':cluster_name}}
clusters = registry.get_clusters_detail(req.context, **cluster_params)
if clusters:
msg = "the cluster %s is exist" % clusters[0]['name']
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain")
else:
cluster_meta = registry.add_cluster_metadata(req.context, template_content['cluster'])
cluster_id = cluster_meta['id']
params = {'filters':{}}
networks = registry.get_networks_detail(req.context, cluster_id,**params)
template_content_networks = template_content['networks']
for template_content_network in template_content_networks:
template_content_network['ip_ranges'] = str(template_content_network['ip_ranges'])
network_exist = 'false'
for network in networks:
if template_content_network['name'] == network['name']:
update_network_meta = registry.update_network_metadata(req.context, network['id'], template_content_network)
network_exist = 'true'
if network_exist == 'false':
template_content_network['cluster_id'] = cluster_id
add_network_meta = registry.add_network_metadata(req.context, template_content_network)
params = {'filters': {'cluster_id':cluster_id}}
roles = registry.get_roles_detail(req.context, **params)
template_content_roles = template_content['roles']
for template_content_role in template_content_roles:
role_exist = 'false'
del template_content_role['config_set_id']
for role in roles:
if template_content_role['name'] == role['name']:
update_role_meta = registry.update_role_metadata(req.context, role['id'], template_content_role)
role_exist = 'true'
if role_exist == 'false':
template_content_role['cluster_id'] = cluster_id
add_role_meta = registry.add_role_metadata(req.context, template_content_role)
cinder_volumes = registry.list_cinder_volume_metadata(req.context, **params)
template_content_cinder_volumes = template_content['cinder_volumes']
for template_content_cinder_volume in template_content_cinder_volumes:
cinder_volume_exist = 'false'
roles = registry.get_roles_detail(req.context, **params)
for role in roles:
if template_content_cinder_volume['role_id'] == role['name']:
template_content_cinder_volume['role_id'] = role['id']
for cinder_volume in cinder_volumes:
if template_content_cinder_volume['role_id'] == cinder_volume['role_id']:
update_cinder_volume_meta = registry.update_cinder_volume_metadata(req.context, cinder_volume['id'], template_content_cinder_volume)
cinder_volume_exist = 'true'
if cinder_volume_exist == 'false':
add_cinder_volumes = registry.add_cinder_volume_metadata(req.context, template_content_cinder_volume)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return {"template":cluster_meta}
@utils.mutating
def get_template_detail(self, req, template_id):
"""
delete a existing cluster template with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifie
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'get_template_detail')
try:
template = registry.template_detail_metadata(req.context, template_id)
return {'template': template}
except exception.NotFound as e:
msg = (_("Failed to find template: %s") %
utils.exception_to_str(e))
LOG.error(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to get template: %s") %
utils.exception_to_str(e))
LOG.error(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_("template %(id)s could not be get because it is in use: "
"%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)})
LOG.error(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
else:
return Response(body='', status=200)
@utils.mutating
def get_template_lists(self, req):
self._enforce(req, 'get_template_lists')
params = self._get_query_params(req)
try:
template_lists = registry.template_lists_metadata(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(template=template_lists)
class TemplateDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result["template"] = utils.get_template_meta(request)
return result
def add_template(self, request):
return self._deserialize(request)
def update_template(self, request):
return self._deserialize(request)
def export_db_to_json(self, request):
return self._deserialize(request)
def import_json_to_template(self, request):
return self._deserialize(request)
def import_template_to_db(self, request):
return self._deserialize(request)
class TemplateSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def add_template(self, response, result):
template = result['template']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(template=template))
return response
def delete_template(self, response, result):
template = result['template']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(template=template))
return response
def get_template_detail(self, response, result):
template = result['template']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(template=template))
return response
def update_template(self, response, result):
template = result['template']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(template=template))
return response
def export_db_to_json(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def import_json_to_template(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def import_template_to_db(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def create_resource():
"""Templates resource factory method"""
deserializer = TemplateDeserializer()
serializer = TemplateSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -0,0 +1,289 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glance_store as store_api
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import webob.exc
from daisy.common import exception
from daisy.common import store_utils
from daisy.common import utils
import daisy.db
from daisy import i18n
import daisy.registry.client.v1.api as registry
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
def initiate_deletion(req, location_data, id):
"""
Deletes image data from the location of backend store.
:param req: The WSGI/Webob Request object
:param location_data: Location to the image data in a data store
:param id: Opaque image identifier
"""
store_utils.delete_image_location_from_backend(req.context,
id, location_data)
def _kill(req, image_id, from_state):
"""
Marks the image status to `killed`.
:param req: The WSGI/Webob Request object
:param image_id: Opaque image identifier
:param from_state: Permitted current status for transition to 'killed'
"""
# TODO(dosaboy): http://docs.openstack.org/developer/glance/statuses.html
# needs updating to reflect the fact that queued->killed and saving->killed
# are both allowed.
registry.update_image_metadata(req.context, image_id,
{'status': 'killed'},
from_state=from_state)
def safe_kill(req, image_id, from_state):
"""
Mark image killed without raising exceptions if it fails.
Since _kill is meant to be called from exceptions handlers, it should
not raise itself, rather it should just log its error.
:param req: The WSGI/Webob Request object
:param image_id: Opaque image identifier
:param from_state: Permitted current status for transition to 'killed'
"""
try:
_kill(req, image_id, from_state)
except Exception:
LOG.exception(_LE("Unable to kill image %(id)s: ") % {'id': image_id})
def upload_data_to_store(req, image_meta, image_data, store, notifier):
"""
Upload image data to specified store.
Upload image data to the store and cleans up on error.
"""
image_id = image_meta['id']
db_api = daisy.db.get_api()
image_size = image_meta.get('size')
try:
# By default image_data will be passed as CooperativeReader object.
# But if 'user_storage_quota' is enabled and 'remaining' is not None
# then it will be passed as object of LimitingReader to
# 'store_add_to_backend' method.
image_data = utils.CooperativeReader(image_data)
remaining = daisy.api.common.check_quota(
req.context, image_size, db_api, image_id=image_id)
if remaining is not None:
image_data = utils.LimitingReader(image_data, remaining)
(uri,
size,
checksum,
location_metadata) = store_api.store_add_to_backend(
image_meta['id'],
image_data,
image_meta['size'],
store,
context=req.context)
location_data = {'url': uri,
'metadata': location_metadata,
'status': 'active'}
try:
# recheck the quota in case there were simultaneous uploads that
# did not provide the size
daisy.api.common.check_quota(
req.context, size, db_api, image_id=image_id)
except exception.StorageQuotaFull:
with excutils.save_and_reraise_exception():
LOG.info(_LI('Cleaning up %s after exceeding '
'the quota') % image_id)
store_utils.safe_delete_from_backend(
req.context, image_meta['id'], location_data)
def _kill_mismatched(image_meta, attr, actual):
supplied = image_meta.get(attr)
if supplied and supplied != actual:
msg = (_("Supplied %(attr)s (%(supplied)s) and "
"%(attr)s generated from uploaded image "
"(%(actual)s) did not match. Setting image "
"status to 'killed'.") % {'attr': attr,
'supplied': supplied,
'actual': actual})
LOG.error(msg)
safe_kill(req, image_id, 'saving')
initiate_deletion(req, location_data, image_id)
raise webob.exc.HTTPBadRequest(explanation=msg,
content_type="text/plain",
request=req)
# Verify any supplied size/checksum value matches size/checksum
# returned from store when adding image
_kill_mismatched(image_meta, 'size', size)
_kill_mismatched(image_meta, 'checksum', checksum)
# Update the database with the checksum returned
# from the backend store
LOG.debug("Updating image %(image_id)s data. "
"Checksum set to %(checksum)s, size set "
"to %(size)d", {'image_id': image_id,
'checksum': checksum,
'size': size})
update_data = {'checksum': checksum,
'size': size}
try:
try:
state = 'saving'
image_meta = registry.update_image_metadata(req.context,
image_id,
update_data,
from_state=state)
except exception.Duplicate:
image = registry.get_image_metadata(req.context, image_id)
if image['status'] == 'deleted':
raise exception.NotFound()
else:
raise
except exception.NotFound:
msg = _LI("Image %s could not be found after upload. The image may"
" have been deleted during the upload.") % image_id
LOG.info(msg)
# NOTE(jculp): we need to clean up the datastore if an image
# resource is deleted while the image data is being uploaded
#
# We get "location_data" from above call to store.add(), any
# exceptions that occur there handle this same issue internally,
# Since this is store-agnostic, should apply to all stores.
initiate_deletion(req, location_data, image_id)
raise webob.exc.HTTPPreconditionFailed(explanation=msg,
request=req,
content_type='text/plain')
except store_api.StoreAddDisabled:
msg = _("Error in store configuration. Adding images to store "
"is disabled.")
LOG.exception(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPGone(explanation=msg, request=req,
content_type='text/plain')
except exception.Duplicate as e:
msg = (_("Attempt to upload duplicate image: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
# NOTE(dosaboy): do not delete the image since it is likely that this
# conflict is a result of another concurrent upload that will be
# successful.
notifier.error('image.upload', msg)
raise webob.exc.HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden upload attempt: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except store_api.StorageFull as e:
msg = (_("Image storage media is full: %s") %
utils.exception_to_str(e))
LOG.error(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except store_api.StorageWriteDenied as e:
msg = (_("Insufficient permissions on image storage media: %s") %
utils.exception_to_str(e))
LOG.error(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPServiceUnavailable(explanation=msg,
request=req,
content_type='text/plain')
except exception.ImageSizeLimitExceeded as e:
msg = (_("Denying attempt to upload image larger than %d bytes.")
% CONF.image_size_cap)
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except exception.StorageQuotaFull as e:
msg = (_("Denying attempt to upload image because it exceeds the "
"quota: %s") % utils.exception_to_str(e))
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except webob.exc.HTTPError:
# NOTE(bcwaldon): Ideally, we would just call 'raise' here,
# but something in the above function calls is affecting the
# exception context and we must explicitly re-raise the
# caught exception.
msg = _LE("Received HTTP error while uploading image %s") % image_id
notifier.error('image.upload', msg)
with excutils.save_and_reraise_exception():
LOG.exception(msg)
safe_kill(req, image_id, 'saving')
except (ValueError, IOError) as e:
msg = _("Client disconnected before sending all data to backend")
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
raise webob.exc.HTTPBadRequest(explanation=msg,
content_type="text/plain",
request=req)
except Exception as e:
msg = _("Failed to upload image %s") % image_id
LOG.exception(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPInternalServerError(explanation=msg,
request=req,
content_type='text/plain')
return image_meta, location_data

View File

View File

@ -0,0 +1,89 @@
# Copyright 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glance_store
from oslo_log import log as logging
import webob.exc
from daisy.api import policy
from daisy.common import exception
from daisy.common import utils
from daisy.common import wsgi
import daisy.db
import daisy.gateway
from daisy import i18n
import daisy.notifier
LOG = logging.getLogger(__name__)
_ = i18n._
_LI = i18n._LI
class ImageActionsController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None):
self.db_api = db_api or daisy.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or daisy.notifier.Notifier()
self.store_api = store_api or glance_store
self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy)
@utils.mutating
def deactivate(self, req, image_id):
image_repo = self.gateway.get_repo(req.context)
try:
image = image_repo.get(image_id)
image.deactivate()
image_repo.save(image)
LOG.info(_LI("Image %s is deactivated") % image_id)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.InvalidImageStatusTransition as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
@utils.mutating
def reactivate(self, req, image_id):
image_repo = self.gateway.get_repo(req.context)
try:
image = image_repo.get(image_id)
image.reactivate()
image_repo.save(image)
LOG.info(_LI("Image %s is reactivated") % image_id)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.InvalidImageStatusTransition as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
class ResponseSerializer(wsgi.JSONResponseSerializer):
def deactivate(self, response, result):
response.status_int = 204
def reactivate(self, response, result):
response.status_int = 204
def create_resource():
"""Image data resource factory method"""
deserializer = None
serializer = ResponseSerializer()
controller = ImageActionsController()
return wsgi.Resource(controller, deserializer, serializer)

View File

@ -0,0 +1,250 @@
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glance_store
from oslo_log import log as logging
from oslo_utils import excutils
import webob.exc
import daisy.api.policy
from daisy.common import exception
from daisy.common import utils
from daisy.common import wsgi
import daisy.db
import daisy.gateway
from daisy import i18n
import daisy.notifier
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
class ImageDataController(object):
def __init__(self, db_api=None, store_api=None,
policy_enforcer=None, notifier=None,
gateway=None):
if gateway is None:
db_api = db_api or daisy.db.get_api()
store_api = store_api or glance_store
policy = policy_enforcer or daisy.api.policy.Enforcer()
notifier = notifier or daisy.notifier.Notifier()
gateway = daisy.gateway.Gateway(db_api, store_api,
notifier, policy)
self.gateway = gateway
def _restore(self, image_repo, image):
"""
Restore the image to queued status.
:param image_repo: The instance of ImageRepo
:param image: The image will be restored
"""
try:
if image_repo and image:
image.status = 'queued'
image_repo.save(image)
except Exception as e:
msg = (_LE("Unable to restore image %(image_id)s: %(e)s") %
{'image_id': image.image_id,
'e': utils.exception_to_str(e)})
LOG.exception(msg)
@utils.mutating
def upload(self, req, image_id, data, size):
image_repo = self.gateway.get_repo(req.context)
image = None
try:
image = image_repo.get(image_id)
image.status = 'saving'
try:
image_repo.save(image)
image.set_data(data, size)
image_repo.save(image, from_state='saving')
except (exception.NotFound, exception.Conflict):
msg = (_("Image %s could not be found after upload. "
"The image may have been deleted during the "
"upload, cleaning up the chunks uploaded.") %
image_id)
LOG.warn(msg)
# NOTE(sridevi): Cleaning up the uploaded chunks.
try:
image.delete()
except exception.NotFound:
# NOTE(sridevi): Ignore this exception
pass
raise webob.exc.HTTPGone(explanation=msg,
request=req,
content_type='text/plain')
except ValueError as e:
LOG.debug("Cannot save data for image %(id)s: %(e)s",
{'id': image_id, 'e': utils.exception_to_str(e)})
self._restore(image_repo, image)
raise webob.exc.HTTPBadRequest(
explanation=utils.exception_to_str(e))
except glance_store.StoreAddDisabled:
msg = _("Error in store configuration. Adding images to store "
"is disabled.")
LOG.exception(msg)
self._restore(image_repo, image)
raise webob.exc.HTTPGone(explanation=msg, request=req,
content_type='text/plain')
except exception.InvalidImageStatusTransition as e:
msg = utils.exception_to_str(e)
LOG.exception(msg)
raise webob.exc.HTTPConflict(explanation=e.msg, request=req)
except exception.Forbidden as e:
msg = ("Not allowed to upload image data for image %s" %
image_id)
LOG.debug(msg)
raise webob.exc.HTTPForbidden(explanation=msg, request=req)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except glance_store.StorageFull as e:
msg = _("Image storage media "
"is full: %s") % utils.exception_to_str(e)
LOG.error(msg)
self._restore(image_repo, image)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req)
except exception.StorageQuotaFull as e:
msg = _("Image exceeds the storage "
"quota: %s") % utils.exception_to_str(e)
LOG.error(msg)
self._restore(image_repo, image)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req)
except exception.ImageSizeLimitExceeded as e:
msg = _("The incoming image is "
"too large: %s") % utils.exception_to_str(e)
LOG.error(msg)
self._restore(image_repo, image)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req)
except glance_store.StorageWriteDenied as e:
msg = _("Insufficient permissions on image "
"storage media: %s") % utils.exception_to_str(e)
LOG.error(msg)
self._restore(image_repo, image)
raise webob.exc.HTTPServiceUnavailable(explanation=msg,
request=req)
except webob.exc.HTTPGone as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to upload image data due to HTTP error"))
except webob.exc.HTTPError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to upload image data due to HTTP error"))
self._restore(image_repo, image)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to upload image data due to "
"internal error"))
self._restore(image_repo, image)
def download(self, req, image_id):
image_repo = self.gateway.get_repo(req.context)
try:
image = image_repo.get(image_id)
if image.status == 'deactivated':
msg = _('The requested image has been deactivated. '
'Image data download is forbidden.')
raise exception.Forbidden(message=msg)
if not image.locations:
raise exception.ImageDataNotFound()
except exception.ImageDataNotFound as e:
raise webob.exc.HTTPNoContent(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
return image
class RequestDeserializer(wsgi.JSONRequestDeserializer):
def upload(self, request):
try:
request.get_content_type(('application/octet-stream',))
except exception.InvalidContentType as e:
raise webob.exc.HTTPUnsupportedMediaType(explanation=e.msg)
image_size = request.content_length or None
return {'size': image_size, 'data': request.body_file}
class ResponseSerializer(wsgi.JSONResponseSerializer):
def download(self, response, image):
offset, chunk_size = 0, None
range_val = response.request.get_content_range()
if range_val:
# NOTE(flaper87): if not present, both, start
# and stop, will be None.
if range_val.start is not None:
offset = range_val.start
if range_val.stop is not None:
chunk_size = range_val.stop - offset
response.headers['Content-Type'] = 'application/octet-stream'
try:
# NOTE(markwash): filesystem store (and maybe others?) cause a
# problem with the caching middleware if they are not wrapped in
# an iterator very strange
response.app_iter = iter(image.get_data(offset=offset,
chunk_size=chunk_size))
except glance_store.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except glance_store.RemoteServiceUnavailable as e:
raise webob.exc.HTTPServiceUnavailable(explanation=e.msg)
except (glance_store.StoreGetNotSupported,
glance_store.StoreRandomGetNotSupported) as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
# NOTE(saschpe): "response.app_iter = ..." currently resets Content-MD5
# (https://github.com/Pylons/webob/issues/86), so it should be set
# afterwards for the time being.
if image.checksum:
response.headers['Content-MD5'] = image.checksum
# NOTE(markwash): "response.app_iter = ..." also erroneously resets the
# content-length
response.headers['Content-Length'] = str(image.size)
def upload(self, response, result):
response.status_int = 204
def create_resource():
"""Image data resource factory method"""
deserializer = RequestDeserializer()
serializer = ResponseSerializer()
controller = ImageDataController()
return wsgi.Resource(controller, deserializer, serializer)

Some files were not shown because too many files have changed in this diff Show More